def update_system_profile(input_host, identity, staleness_offset, fields): if not input_host.system_profile_facts: raise InventoryException( title="Invalid request", detail= "Cannot update System Profile, since no System Profile data was provided." ) with session_guard(db.session): if input_host.id: existing_host = find_existing_host_by_id(identity, input_host.id) else: existing_host = find_existing_host(identity, input_host.canonical_facts) if existing_host: logger.debug("Updating system profile on an existing host") logger.debug(f"existing host = {existing_host}") existing_host.update_system_profile( input_host.system_profile_facts) db.session.commit() metrics.update_host_count.inc() logger.debug("Updated system profile for host:%s", existing_host) output_host = serialize_host(existing_host, staleness_offset, fields) insights_id = existing_host.canonical_facts.get("insights_id") return output_host, existing_host.id, insights_id, AddHostResult.updated else: raise InventoryException( title="Invalid request", detail= "Could not find an existing host with the provided facts.")
def main(logger): config = _init_config() registry = CollectorRegistry() for metric in COLLECTED_METRICS: registry.register(metric) job = _prometheus_job(config.kubernetes_namespace) prometheus_shutdown = partial(push_to_gateway, config.prometheus_pushgateway, job, registry) register_shutdown(prometheus_shutdown, "Pushing metrics") Session = _init_db(config) session = Session() register_shutdown(session.get_bind().dispose, "Closing database") event_producer = EventProducer(config) register_shutdown(event_producer.close, "Closing producer") shutdown_handler = ShutdownHandler() shutdown_handler.register() with session_guard(session): run(config, logger, session, event_producer, shutdown_handler)
def main(logger): application = create_app(RuntimeEnvironment.JOB) config = application.config["INVENTORY_CONFIG"] Session = _init_db(config) session = Session() # TODO: Metrics # start_http_server(config.metrics_port) consumer = KafkaConsumer( bootstrap_servers=config.bootstrap_servers, api_version=(0, 10, 1), value_deserializer=lambda m: m.decode(), **config.events_kafka_consumer, ) register_shutdown(session.get_bind().dispose, "Closing database") consumer_shutdown = partial(consumer.close, autocommit=True) register_shutdown(consumer_shutdown, "Closing consumer") event_producer = EventProducer(config) register_shutdown(event_producer.close, "Closing producer") shutdown_handler = ShutdownHandler() shutdown_handler.register() with session_guard(session): run(config, logger, session, consumer, event_producer, shutdown_handler)
def delete_hosts(select_query, event_producer, chunk_size, interrupt=lambda: False): with session_guard(select_query.session): while select_query.count(): for host in select_query.limit(chunk_size): host_id = host.id with delete_host_processing_time.time(): host_deleted = _delete_host(select_query.session, event_producer, host) yield host_id, host_deleted if interrupt(): return
def msg_handler(parsed): id_ = parsed["id"] threadctx.request_id = parsed["request_id"] if not id_: logger.error("ID is null, something went wrong.") return with session_guard(db.session): host = Host.query.get(id_) if host is None: logger.error("Host with id [%s] not found!", id_) return logger.info("Processing message id=%s request_id=%s", parsed["id"], parsed["request_id"]) profile = SystemProfileSchema(strict=True).load( parsed["system_profile"]).data host._update_system_profile(profile)
def main(logger): config = _init_config() init_tasks(config) registry = CollectorRegistry() for metric in COLLECTED_METRICS: registry.register(metric) Session = _init_db(config) session = Session() try: with session_guard(session): run(config, logger, session) finally: flush() job = _prometheus_job(config.kubernetes_namespace) push_to_gateway(config.prometheus_pushgateway, job, registry)
def main(logger): config = _init_config() registry = CollectorRegistry() for metric in COLLECTED_METRICS: registry.register(metric) Session = _init_db(config) session = Session() event_producer = EventProducer(config) try: with session_guard(session): run(config, logger, session, event_producer) finally: try: job = _prometheus_job(config.kubernetes_namespace) push_to_gateway(config.prometheus_pushgateway, job, registry) finally: event_producer.close()
def add_host(input_host, staleness_offset, update_system_profile=True, fields=DEFAULT_FIELDS): """ Add or update a host Required parameters: - at least one of the canonical facts fields is required - account number """ with session_guard(db.session): existing_host = find_existing_host(input_host.account, input_host.canonical_facts) if existing_host: return update_existing_host(existing_host, input_host, staleness_offset, update_system_profile, fields) else: return create_new_host(input_host, staleness_offset, fields)
def main(config_name): config = _init_config(config_name) init_tasks(config) registry = CollectorRegistry() for metric in COLLECTED_METRICS: registry.register(metric) Session = _init_db(config) session = Session() try: with session_guard(session): run(config, session) except Exception as exception: logger = get_logger(LOGGER_NAME) logger.exception(exception) finally: flush() job = _prometheus_job(config.kubernetes_namespace) push_to_gateway(config.prometheus_pushgateway, job, registry)