def test_ensure_version_higher(self, caplog, pg_db_conn):
     """Test version ensuring with bigger number"""
     utils.CFG.minimal_schema = 0
     with caplog.at_level(logging.INFO):
         utils.ensure_minimal_schema_version()
     assert f"minimal required: {utils.CFG.minimal_schema}, OK" in caplog.messages[
         0]
示例#2
0
def main():
    """Sets up and run whole application"""
    # Set up endpoint for prometheus monitoring
    init_logging()
    ensure_minimal_schema_version()
    LOGGER.info("Using BOOTSTRAP_SERVERS: %s", mqueue.BOOTSTRAP_SERVERS)
    LOGGER.info("Using GROUP_ID: %s", mqueue.GROUP_ID)
    LOGGER.info("Using TOPICS: %s", ", ".join(kafka_evaluator_topic))
    LOGGER.info("Opening port [%s] for prometheus", prometheus_port)
    start_http_server(int(prometheus_port))
    MAIN_LOOP.run_until_complete(run())
    def test_ensure_version_lower(self, caplog, pg_db_conn):
        """Test version ensuring with lower number"""
        utils.CFG.minimal_schema = sys.maxsize

        # sleep is not needed in test suite
        def sleep_break(_):
            raise TestException("OK")

        utils.sleep = sleep_break
        with pytest.raises(TestException):
            with caplog.at_level(logging.INFO):
                utils.ensure_minimal_schema_version()

        assert f"minimal required: {utils.CFG.minimal_schema}, waiting..." in caplog.messages[
            0]
示例#4
0
def main():
    """Creates schleduler, fills it up with tasks and runs it"""
    init_logging()
    ensure_minimal_schema_version()

    scheduler = BlockingScheduler()

    for job_info in JOBS:
        job_name, interval = job_info.split(':')
        job = import_job(job_name)
        if job:
            LOGGER.info('Adding job: %s, cadence each %s minutes', job_name, interval)
            scheduler.add_job(job.run, IntervalTrigger(minutes=int(interval)))
        else:
            LOGGER.error('Couldn\'t find job data for job: %s', job_name)
    scheduler.start()
示例#5
0
def main():
    """Main VMaaS listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    ensure_minimal_schema_version()
    LOGGER.info("Starting VMaaS sync service.")
    with DatabasePool(1):
        app = ServerApplication()
        app.listen(8000)

        def terminate(*_):
            """Trigger shutdown."""
            LOGGER.info("Signal received, stopping application.")
            IOLoop.instance().add_callback_from_signal(app.stop)

        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            signal.signal(sig, terminate)

        app.start()
    LOGGER.info("Shutting down.")
def main():
    # pylint: disable=too-many-branches, too-many-statements
    """Application entrypoint"""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    ensure_minimal_schema_version()
    LOGGER.info('Starting advisor listener.')

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.debug('Message from topic %s, body: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode('utf8'))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception('Unable to parse message: ')
            return

        send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                    msg_dict['input'], 'processing',
                                    'Starting advisor evaluation')

        if not validate_kafka_msg(msg_dict, REQUIRED_MESSAGE_FIELDS):
            INVALID_INSIGHTS_ACC.inc()
            send_msg_to_payload_tracker(
                PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                'Skipped advisor result due to message coming from non-insights account.'
            )
            LOGGER.debug(
                'Skipped advisor result due to coming from non-insights account.'
            )
        identity = get_identity(
            msg_dict['input']['platform_metadata']['b64_identity'])
        if identity is None:
            INVALID_IDENTITY.inc()
            send_msg_to_payload_tracker(
                PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                'Skipped advisor result due to invalid identity header.')
            LOGGER.debug(
                'Skipped advisor result due to invalid identity header.')
            return
        if not is_entitled_insights(identity, allow_missing_section=True):
            MISSING_INSIGHTS_ENTITLEMENT.inc()
            send_msg_to_payload_tracker(
                PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                'Skipped advisor result due to missing insights entitlement.')
            LOGGER.debug(
                'Skipped advisor result due to missing insights entitlement.')
            return

        # TODO: insert system into database if it's 1st upload, shall we update last seen?
        system_data = {
            'rh_account':
            msg_dict['input']['host']['account'],
            'display_name':
            msg_dict['input']['host']['display_name'],
            'inventory_id':
            msg_dict['input']['host']['id'],
            'stale_timestamp':
            msg_dict['input']['host']['stale_timestamp'],
            'stale_warning_timestamp':
            msg_dict['input']['host']['stale_warning_timestamp'],
            'culled_timestamp':
            msg_dict['input']['host']['culled_timestamp']
        }

        LOGGER.info("Evaluating rule hits for inventory_id: %s",
                    system_data["inventory_id"])

        rule_hits = {}

        reports = msg_dict['results']['reports']
        for report in reports:
            if 'cves' in report['details']:
                rule = report['rule_id']
                if rule in RULE_BLACKLIST:
                    # TODO: remove this once CVE_2017_5753_4_cpu_kernel and CVE_2017_5715_cpu_virt are merged
                    continue
                if rule not in RULES_CACHE:
                    db_import_rule(rule,
                                   list(report['details']['cves'].keys()))
                for cve in report['details']['cves']:
                    if cve not in CVES_CACHE:
                        db_import_cve(cve)
                    if not report['details']['cves'][
                            cve]:  # False in the CVE dict indicates failed rule
                        rule_hits[CVES_CACHE[cve]] = {
                            'id': RULES_CACHE[rule],
                            'details': json.dumps(report['details']),
                            'cve_name': cve
                        }
                    elif report['details']['cves'][cve]:
                        rule_hits[CVES_CACHE[cve]] = {
                            'id': RULES_CACHE[rule],
                            'mitigation_reason': report['details']['cves'][cve]
                        }

        try:
            success = db_import_system(system_data, rule_hits, loop)
        except DatabaseError as exc:
            success = False
            # The exception should not get lost
            raise exc
        finally:
            LOGGER.debug("Finished evaluating rule hits for inventory_id: %s",
                         system_data["inventory_id"])
            if success:
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict['input'], 'success')
            else:
                send_msg_to_payload_tracker(
                    PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                    'Error importing system to vulnerability')

    with DatabasePool(WORKER_THREADS):
        db_init_caches()

        ADVISOR_QUEUE.listen(process_message)

        loop.run_forever()

        LOGGER.info('Shutting down.')
        executor.shutdown()
def main():  # pylint: disable=too-many-statements
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    ensure_minimal_schema_version()
    LOGGER.info("Starting upload listener.")

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):  # pylint: disable=too-many-return-statements,too-many-branches
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.debug('Received message from topic %s: %s', msg.topic,
                     msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return
        FailedCache.process_failed_cache(FailedCache.upload_cache, executor,
                                         process_upload, loop)
        FailedCache.process_failed_cache(FailedCache.delete_cache, executor,
                                         process_delete, loop)

        if msg.topic == mqueue.EVENTS_TOPIC:
            if msg_dict.get("type", "") in ["created", "updated"]:
                if not validate_kafka_msg(
                        msg_dict, REQUIRED_CREATED_UPDATED_MESSAGE_FIELDS):
                    SKIPPED_MESSAGES.inc()
                    return
                if msg_dict.get("platform_metadata"):
                    if not validate_kafka_msg(msg_dict,
                                              REQUIRED_UPLOAD_MESSAGE_FIELDS):
                        SKIPPED_MESSAGES.inc()
                        return
                    LOGGER.info(
                        "Received created/updated msg, inventory_id: %s, type: %s",
                        msg_dict["host"]["id"], msg_dict["type"])
                    # send message to payload tracker
                    send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                                msg_dict,
                                                'received',
                                                loop=loop)
                    # process only system uploads from insights entitled accounts
                    identity = get_identity(
                        msg_dict["platform_metadata"]["b64_identity"])
                    if identity is None:
                        INVALID_IDENTITY.inc()
                        error_msg = "Skipped upload due to invalid identity header."
                        LOGGER.warning(error_msg)
                        send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                                    msg_dict,
                                                    'error',
                                                    status_msg=error_msg,
                                                    loop=loop)
                        return
                    if not is_entitled_insights(identity,
                                                allow_missing_section=True):
                        MISSING_INSIGHTS_ENTITLEMENT.inc()
                        error_msg = "Skipped upload due to missing insights entitlement."
                        LOGGER.debug(error_msg)
                        send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                                    msg_dict,
                                                    'error',
                                                    status_msg=error_msg,
                                                    loop=loop)
                        return
                    process_func = process_upload
                else:
                    # display name change message doesn't have platform_metadata section, cannot validate identity and track payload,
                    # support only display name change
                    LOGGER.info("Received update event msg, inventory_id: %s",
                                msg_dict["host"]["id"])
                    process_func = process_update
            elif msg_dict.get("type", "") == "delete":
                if not validate_kafka_msg(msg_dict,
                                          REQUIRED_DELETE_MESSAGE_FIELDS):
                    SKIPPED_MESSAGES.inc()
                    return
                LOGGER.info("Received delete msg, inventory_id: %s",
                            msg_dict["id"])
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        future = executor.submit(process_func, msg_dict, loop=loop)
        future.add_done_callback(on_thread_done)

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        executor.shutdown()