Exemple #1
0
def synchronize_hosts(select_query,
                      event_producer,
                      chunk_size,
                      config,
                      interrupt=lambda: False):
    query = select_query.order_by(Host.id)
    host_list = query.limit(chunk_size).all()

    while len(host_list) > 0 and not interrupt():
        for host in host_list:
            serialized_host = serialize_host(host,
                                             Timestamps.from_config(config),
                                             EGRESS_HOST_FIELDS)
            event = build_event(EventType.updated, serialized_host)
            insights_id = host.canonical_facts.get("insights_id")
            headers = message_headers(EventType.updated, insights_id)
            # in case of a failed update event, event_producer logs the message.
            event_producer.write_event(event, str(host.id), headers,
                                       Topic.events)
            synchronize_host_count.inc()

            yield host.id

        try:
            # pace the events production speed as flush completes sending all buffered records.
            event_producer._kafka_producer.flush(300)
        except KafkaTimeoutError:
            raise KafkaTimeoutError(
                f"KafkaTimeoutError: failure to flush {chunk_size} records within 300 seconds"
            )

        # load next chunk using keyset pagination
        host_list = query.filter(
            Host.id > host_list[-1].id).limit(chunk_size).all()
Exemple #2
0
def update_system_profile(host_data, platform_metadata):
    payload_tracker = get_payload_tracker(request_id=threadctx.request_id)

    with PayloadTrackerProcessingContext(
            payload_tracker,
            processing_status_message="updating host system profile",
            current_operation="updating host system profile",
    ) as payload_tracker_processing_ctx:

        try:
            input_host = deserialize_host(host_data, schema=LimitedHostSchema)
            input_host.id = host_data.get("id")
            staleness_timestamps = Timestamps.from_config(inventory_config())
            identity = create_mock_identity_with_account(input_host.account)
            output_host, host_id, insights_id, update_result = host_repository.update_system_profile(
                input_host, identity, staleness_timestamps, EGRESS_HOST_FIELDS)
            log_update_system_profile_success(logger, output_host)
            payload_tracker_processing_ctx.inventory_id = output_host["id"]
            return output_host, host_id, insights_id, update_result
        except ValidationException:
            metrics.update_system_profile_failure.labels(
                "ValidationException").inc()
            raise
        except InventoryException:
            log_update_system_profile_failure(logger, host_data)
            raise
        except OperationalError as oe:
            log_db_access_failure(logger, f"Could not access DB {str(oe)}",
                                  host_data)
            raise oe
        except Exception:
            logger.exception("Error while updating host system profile",
                             extra={"host": host_data})
            metrics.update_system_profile_failure.labels("Exception").inc()
            raise
def add_host(host_data):
    payload_tracker = get_payload_tracker(request_id=threadctx.request_id)

    with PayloadTrackerProcessingContext(
            payload_tracker, processing_status_message="adding/updating host"
    ) as payload_tracker_processing_ctx:

        try:
            input_host = deserialize_host(host_data)
            staleness_timestamps = Timestamps.from_config(inventory_config())
            logger.info(
                "Attempting to add host",
                extra={
                    "input_host": {
                        "account": input_host.account,
                        "display_name": input_host.display_name,
                        "canonical_facts": input_host.canonical_facts,
                        "reporter": input_host.reporter,
                        "stale_timestamp":
                        input_host.stale_timestamp.isoformat(),
                        "tags": input_host.tags,
                    }
                },
            )
            (output_host,
             add_results) = host_repository.add_host(input_host,
                                                     staleness_timestamps,
                                                     fields=EGRESS_HOST_FIELDS)
            metrics.add_host_success.labels(
                add_results.name,
                host_data.get("reporter", "null")).inc()  # created vs updated
            # log all the incoming host data except facts and system_profile b/c they can be quite large
            logger.info(
                "Host %s",
                add_results.name,
                extra={
                    "host": {
                        i: output_host[i]
                        for i in output_host
                        if i not in ("facts", "system_profile")
                    }
                },
            )
            payload_tracker_processing_ctx.inventory_id = output_host["id"]
            return (output_host, add_results)
        except InventoryException:
            logger.exception("Error adding host ", extra={"host": host_data})
            metrics.add_host_failure.labels("InventoryException",
                                            host_data.get("reporter",
                                                          "null")).inc()
            raise
        except Exception:
            logger.exception("Error while adding host",
                             extra={"host": host_data})
            metrics.add_host_failure.labels("Exception",
                                            host_data.get("reporter",
                                                          "null")).inc()
            raise
    def _update_host(self, added_host_index, new_id, new_modified_on):
        old_id = self.added_hosts[added_host_index].id

        old_host = db.session.query(Host).get(old_id)
        old_host.id = new_id
        old_host.modified_on = new_modified_on
        db.session.add(old_host)

        staleness_offset = Timestamps.from_config(
            self.app.config["INVENTORY_CONFIG"])
        serialized_old_host = serialize_host(old_host, staleness_offset)
        self.added_hosts[added_host_index] = HostWrapper(serialized_old_host)
Exemple #5
0
def add_host(host_data, platform_metadata):
    payload_tracker = get_payload_tracker(request_id=threadctx.request_id)

    with PayloadTrackerProcessingContext(
            payload_tracker,
            processing_status_message="adding/updating host",
            current_operation="adding/updating host"
    ) as payload_tracker_processing_ctx:

        try:
            identity = _get_identity(host_data, platform_metadata)
            # basic-auth does not need owner_id
            if identity.identity_type == IdentityType.SYSTEM:
                host_data = _set_owner(host_data, identity)

            input_host = deserialize_host(host_data)
            staleness_timestamps = Timestamps.from_config(inventory_config())
            log_add_host_attempt(logger, input_host)
            output_host, host_id, insights_id, add_result = host_repository.add_host(
                input_host,
                identity,
                staleness_timestamps,
                fields=EGRESS_HOST_FIELDS)
            log_add_update_host_succeeded(logger, add_result, host_data,
                                          output_host)
            payload_tracker_processing_ctx.inventory_id = output_host["id"]
            return output_host, host_id, insights_id, add_result
        except ValidationException:
            metrics.add_host_failure.labels("ValidationException",
                                            host_data.get("reporter",
                                                          "null")).inc()
            raise
        except InventoryException as ie:
            log_add_host_failure(logger, str(ie.detail), host_data)
            raise
        except OperationalError as oe:
            log_db_access_failure(logger, f"Could not access DB {str(oe)}",
                                  host_data)
            raise oe
        except Exception:
            logger.exception("Error while adding host",
                             extra={"host": host_data})
            metrics.add_host_failure.labels("Exception",
                                            host_data.get("reporter",
                                                          "null")).inc()
            raise
def add_host(host_data):
    payload_tracker = get_payload_tracker(payload_id=threadctx.request_id)

    with PayloadTrackerProcessingContext(
            payload_tracker, processing_status_message="adding/updating host"
    ) as payload_tracker_processing_ctx:

        try:
            logger.info("Attempting to add host...")
            input_host = deserialize_host(host_data)
            staleness_timestamps = Timestamps.from_config(inventory_config())
            (output_host,
             add_results) = host_repository.add_host(input_host,
                                                     staleness_timestamps,
                                                     fields=EGRESS_HOST_FIELDS)
            metrics.add_host_success.labels(
                add_results.name,
                host_data.get("reporter", "null")).inc()  # created vs updated
            logger.info(
                "Host added"
            )  # This definitely needs to be more specific (added vs updated?)
            payload_tracker_processing_ctx.inventory_id = output_host["id"]
            return (output_host, add_results)
        except InventoryException:
            logger.exception("Error adding host ", extra={"host": host_data})
            metrics.add_host_failure.labels("InventoryException",
                                            host_data.get("reporter",
                                                          "null")).inc()
            raise
        except Exception:
            logger.exception("Error while adding host",
                             extra={"host": host_data})
            metrics.add_host_failure.labels("Exception",
                                            host_data.get("reporter",
                                                          "null")).inc()
            raise
Exemple #7
0
def staleness_timestamps():
    return Timestamps.from_config(inventory_config())
    # print(query_results)
    if args.id:
        host_id_list = [args.id]
        print("looking up host using id")
        query_results = Host.query.filter(Host.id.in_(host_id_list)).all()
    elif args.hostname:
        print("looking up host using display_name, fqdn")
        query_results = Host.query.filter(
            Host.display_name.comparator.contains(args.hostname)
            | Host.canonical_facts["fqdn"].astext.contains(args.hostname)).all(
            )
    elif args.insights_id:
        print("looking up host using insights_id")
        query_results = Host.query.filter(
            Host.canonical_facts.comparator.contains(
                {"insights_id": args.insights_id})).all()
    elif args.account_number:
        query_results = Host.query.filter(
            Host.account == args.account_number).all()

    staleness_timestamps = Timestamps.from_config(inventory_config())
    json_host_list = [
        serialize_host(host, staleness_timestamps) for host in query_results
    ]

    if args.no_pp:
        print(json_host_list)
    else:
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(json_host_list)
def serialize_db_host(host, inventory_config):
    staleness_offset = Timestamps.from_config(inventory_config)
    serialized_host = serialize_host(host, staleness_offset)

    return HostWrapper(serialized_host)