def main(logger):

    config = _init_config()
    registry = CollectorRegistry()

    for metric in COLLECTED_METRICS:
        registry.register(metric)

    job = _prometheus_job(config.kubernetes_namespace)
    prometheus_shutdown = partial(push_to_gateway,
                                  config.prometheus_pushgateway, job, registry)
    register_shutdown(prometheus_shutdown, "Pushing metrics")

    Session = _init_db(config)
    accounts_session = Session()
    hosts_session = Session()
    misc_session = Session()
    register_shutdown(accounts_session.get_bind().dispose, "Closing database")
    register_shutdown(hosts_session.get_bind().dispose, "Closing database")
    register_shutdown(misc_session.get_bind().dispose, "Closing database")

    event_producer = EventProducer(config)
    register_shutdown(event_producer.close, "Closing producer")

    shutdown_handler = ShutdownHandler()
    shutdown_handler.register()

    with multi_session_guard([accounts_session, hosts_session, misc_session]):
        run(config, logger, accounts_session, hosts_session, misc_session,
            event_producer, shutdown_handler)
Beispiel #2
0
def main():
    application = create_app(RuntimeEnvironment.SERVICE)
    config = application.config["INVENTORY_CONFIG"]
    start_http_server(config.metrics_port)

    topic_to_handler = {
        config.host_ingress_topic: add_host,
        config.system_profile_topic: update_system_profile
    }

    consumer = KafkaConsumer(
        config.kafka_consumer_topic,
        group_id=config.host_ingress_consumer_group,
        bootstrap_servers=config.bootstrap_servers,
        api_version=(0, 10, 1),
        value_deserializer=lambda m: m.decode(),
        **config.kafka_consumer,
    )
    consumer_shutdown = partial(consumer.close, autocommit=True)
    register_shutdown(consumer_shutdown, "Closing consumer")

    event_producer = EventProducer(config)
    register_shutdown(event_producer.close, "Closing producer")

    shutdown_handler = ShutdownHandler()
    shutdown_handler.register()

    message_handler = partial(
        handle_message,
        message_operation=topic_to_handler[config.kafka_consumer_topic])

    event_loop(consumer, application, event_producer, message_handler,
               shutdown_handler.shut_down)
def main(logger):

    config = _init_config()
    registry = CollectorRegistry()

    for metric in COLLECTED_METRICS:
        registry.register(metric)

    job = _prometheus_job(config.kubernetes_namespace)
    prometheus_shutdown = partial(push_to_gateway, config.prometheus_pushgateway, job, registry)
    register_shutdown(prometheus_shutdown, "Pushing metrics")

    Session = _init_db(config)
    session = Session()
    register_shutdown(session.get_bind().dispose, "Closing database")

    shutdown_handler = ShutdownHandler()
    shutdown_handler.register()

    with session_guard(session):
        run(config, logger, session, shutdown_handler)
Beispiel #4
0
def main(logger):
    application = create_app(RuntimeEnvironment.JOB)
    config = application.config["INVENTORY_CONFIG"]
    Session = _init_db(config)
    session = Session()
    # TODO: Metrics
    # start_http_server(config.metrics_port)

    consumer = KafkaConsumer(
        bootstrap_servers=config.bootstrap_servers,
        api_version=(0, 10, 1),
        value_deserializer=lambda m: m.decode(),
        **config.events_kafka_consumer,
    )

    register_shutdown(session.get_bind().dispose, "Closing database")
    consumer_shutdown = partial(consumer.close, autocommit=True)
    register_shutdown(consumer_shutdown, "Closing consumer")
    event_producer = EventProducer(config)
    register_shutdown(event_producer.close, "Closing producer")
    shutdown_handler = ShutdownHandler()
    shutdown_handler.register()

    with session_guard(session):
        run(config, logger, session, consumer, event_producer, shutdown_handler)
def create_app(runtime_environment):
    connexion_options = {"swagger_ui": True}
    # This feels like a hack but it is needed.  The logging configuration
    # needs to be setup before the flask app is initialized.
    configure_logging()

    app_config = Config(runtime_environment)
    app_config.log_configuration()

    connexion_app = connexion.App("inventory",
                                  specification_dir="./swagger/",
                                  options=connexion_options)

    # Read the swagger.yml file to configure the endpoints
    parser = ResolvingParser(SPECIFICATION_FILE, resolve_types=RESOLVE_FILES)
    parser.parse()

    for api_url in app_config.api_urls:
        if api_url:
            connexion_app.add_api(
                parser.specification,
                arguments={"title": "RestyResolver Example"},
                resolver=RestyResolver("api"),
                validate_responses=True,
                strict_validation=True,
                base_path=api_url,
            )
            logger.info("Listening on API: %s", api_url)

    # Add an error handler that will convert our top level exceptions
    # into error responses
    connexion_app.add_error_handler(InventoryException, render_exception)

    flask_app = connexion_app.app

    flask_app.config["SQLALCHEMY_ECHO"] = False
    flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
    flask_app.config["SQLALCHEMY_DATABASE_URI"] = app_config.db_uri
    flask_app.config["SQLALCHEMY_POOL_SIZE"] = app_config.db_pool_size
    flask_app.config["SQLALCHEMY_POOL_TIMEOUT"] = app_config.db_pool_timeout

    flask_app.config["INVENTORY_CONFIG"] = app_config

    db.init_app(flask_app)

    register_shutdown(db.get_engine(flask_app).dispose, "Closing database")

    flask_app.register_blueprint(monitoring_blueprint,
                                 url_prefix=app_config.mgmt_url_path_prefix)

    @flask_app.before_request
    def set_request_id():
        threadctx.request_id = request.headers.get(REQUEST_ID_HEADER,
                                                   UNKNOWN_REQUEST_ID_VALUE)

    if runtime_environment.event_producer_enabled:
        flask_app.event_producer = EventProducer(app_config)
        register_shutdown(flask_app.event_producer.close,
                          "Closing EventProducer")
    else:
        logger.warning(
            "WARNING: The event producer has been disabled.  "
            "The message queue based event notifications have been disabled.")

    payload_tracker_producer = None
    if not runtime_environment.payload_tracker_enabled:
        # If we are running in "testing" mode, then inject the NullProducer.
        payload_tracker_producer = payload_tracker.NullProducer()

        logger.warning(
            "WARNING: Using the NullProducer for the payload tracker producer.  "
            "No payload tracker events will be sent to to payload tracker.")

    payload_tracker.init_payload_tracker(app_config,
                                         producer=payload_tracker_producer)

    # HTTP request metrics
    if runtime_environment.metrics_endpoint_enabled:
        PrometheusMetrics(
            flask_app,
            defaults_prefix="inventory",
            group_by="url_rule",
            path=None,
            excluded_paths=[
                "^/metrics$", "^/health$", "^/version$", r"^/favicon\.ico$"
            ],
        )

    # initialize metrics to zero
    initialize_metrics(app_config)

    return flask_app