Example #1
0
def main():
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    init_db()
    LOGGER.info("Starting upload listener.")
    # get DB connection
    conn = DatabaseHandler.get_connection()

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return

        if msg.topic == mqueue.UPLOAD_TOPIC:
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        if 'id' not in msg_dict or msg_dict["id"] is None:
            MISSING_ID.inc()
            LOGGER.warning(
                "Unable to process message, inventory ID is missing.")
            return

        future = executor.submit(process_func, msg_dict, conn, loop=loop)
        future.add_done_callback(on_thread_done)

    LISTENER_QUEUE.listen(process_message)

    # wait until loop is stopped from terminate callback
    loop.run_forever()

    LOGGER.info("Shutting down.")
    executor.shutdown()
    def test_executor_single(self):
        """Test executor as single task"""
        executor = BoundedExecutor(1)
        number = 1

        future = executor.submit(TestBoundedExecutor._executor_func_mock,
                                 number)
        res = future.result()

        assert res == number
        executor.shutdown()
def run():
    """Application entrypoint"""
    LOGGER.info("Started cacheman job.")

    conn = get_conn()
    cur = conn.cursor()

    current_cache = {}
    cur.execute("""SELECT rh_account_id, cve_id, systems_affected, systems_status_divergent
                   FROM cve_account_cache""")
    for rh_account_id, cve_id, systems_affected, systems_status_divergent in cur.fetchall():
        current_cache.setdefault(rh_account_id, {})[cve_id] = (systems_affected, systems_status_divergent)

    cur.execute("""SELECT sp.rh_account_id, a.name, a.cve_cache_from,
                          GREATEST(MAX(sp.last_evaluation), MAX(sp.advisor_evaluated), MAX(sp.when_deleted),
                                   MAX(a.last_status_change)) AS last_system_change,
                          a.cve_cache_keepalive,
                          COUNT(*) AS total_systems
                   FROM system_platform sp INNER JOIN
                        rh_account a on sp.rh_account_id = a.id
                   GROUP BY sp.rh_account_id, a.name, a.cve_cache_from, a.cve_cache_keepalive
                   HAVING COUNT(*) >= %s""", (CFG.cache_minimal_account_systems,))
    accounts = [(account_id, account_name, cve_cache_from, last_system_change, cve_cache_keepalive)
                for account_id, account_name, cve_cache_from, last_system_change, cve_cache_keepalive, _ in cur.fetchall()
                if validate_cve_cache_keepalive(cve_cache_keepalive, 2)]
    LOGGER.info("Accounts with enabled cache: %s", len(accounts))
    accounts_to_refresh = [account for account in accounts if account[3] and (not account[2] or account[3] > account[2])]
    LOGGER.info("Accounts requiring cache refresh: %s", len(accounts_to_refresh))

    # Process accounts in parallel
    with DatabasePool(CACHE_WORKERS):
        executor = BoundedExecutor(CACHE_WORKERS, max_workers=CACHE_WORKERS)
        futures = []
        for account_id, account_name, _, _, _ in accounts_to_refresh:
            futures.append(executor.submit(_materialize_account_cache, account_id, account_name, current_cache))
        for future in futures:
            future.result()
        executor.shutdown()
    # Pop out cached accounts after all workers are done
    for account_id, _, _, _, _ in accounts:
        current_cache.pop(account_id, None)

    LOGGER.info("Accounts to disable cache: %s", len(current_cache))
    for account_id in current_cache:
        cur.execute("""DELETE FROM cve_account_cache WHERE rh_account_id = %s""", (account_id,))
        cur.execute("""UPDATE rh_account SET cve_cache_from = NULL WHERE id = %s""", (account_id,))
        conn.commit()

    cur.close()
    conn.close()
    LOGGER.info("Finished cacheman job.")
Example #4
0
    def run(self):
        """
        This method evaluates incoming system package profiles using VMaaS
        :return:
        """

        loop = asyncio.get_event_loop()
        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            loop.add_signal_handler(
                sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
        executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

        # pylint: disable=too-many-branches
        def process_message(message):
            """Message procession logic"""
            try:
                msg_dict = json.loads(message.value.decode('utf-8'))
                FailedCache.process_failed_cache(FailedCache.upload_cache, executor, self.process_upload_or_re_evaluate, loop)
            except json.decoder.JSONDecodeError:
                MESSAGE_PARSE_ERROR.inc()
                LOGGER.exception("Unable to parse message: ")
                return
            if message.topic in kafka_evaluator_topic:
                if 'type' not in msg_dict:
                    LOGGER.error("Received message is missing type field: %s", msg_dict)
                    return
                if msg_dict['type'] in ['upload_new_file', 're-evaluate_system']:
                    process_func = self.process_upload_or_re_evaluate
                    if msg_dict['type'] == 'upload_new_file':
                        send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER, msg_dict, 'processing',
                                                    status_msg='Scheduled for evaluation', loop=loop)
                else:
                    UNKNOWN_MSG.inc()
                    LOGGER.error("Received unknown message type: %s", msg_dict['type'])
                    return

                future = executor.submit(process_func, msg_dict, loop=loop)
                future.add_done_callback(on_thread_done)
            else:
                UNKNOWN_TOPIC.inc()
                LOGGER.error("Received message on unsupported topic: %s", message.topic)

        with DatabasePool(WORKER_THREADS):
            CONSUMER_QUEUE.listen(process_message)

            # wait until loop is stopped from terminate callback
            loop.run_forever()

            LOGGER.info("Shutting down.")
            executor.shutdown()
Example #5
0
    def run(self):
        """
        This method evaluates incoming system package profiles using VMaaS
        :return:
        """

        loop = asyncio.get_event_loop()
        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            loop.add_signal_handler(
                sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
        executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

        # pylint: disable=too-many-branches
        def process_message(message):
            """Message procession logic"""
            if message.topic in kafka_evaluator_topic:
                msg_dict = json.loads(message.value.decode('utf-8'))
                if 'type' not in msg_dict:
                    LOGGER.error("Received message is missing type field: %s",
                                 msg_dict)
                    return
                if msg_dict['type'] in [
                        'upload_new_file', 're-evaluate_system'
                ]:
                    process_func = self.process_upload_or_re_evaluate
                else:
                    UNKNOWN_MSG.inc()
                    LOGGER.error("Received unknown message type: %s",
                                 msg_dict['type'])
                    return

                future = executor.submit(process_func, msg_dict, loop=loop)
                future.add_done_callback(on_thread_done)
            else:
                UNKNOWN_TOPIC.inc()
                LOGGER.error("Received message on unsupported topic: %s",
                             message.topic)

        with DatabasePool(WORKER_THREADS):
            CONSUMER_QUEUE.listen(process_message)

            # wait until loop is stopped from terminate callback
            loop.run_forever()

            LOGGER.info("Shutting down.")
            executor.shutdown()
    def test_executor_blocking(self, caplog):
        """Test executor if is blocking"""
        executor = BoundedExecutor(1)
        number = 1
        with caplog.at_level(log.INFO):
            future1 = executor.submit(TestBoundedExecutor._executor_func_mock,
                                      number)
            number = 2
            future2 = executor.submit(TestBoundedExecutor._executor_func_mock,
                                      number)
            res1 = future1.result()
            res2 = future2.result()

        assert res1 == 1
        assert res2 == 2

        assert "1" in caplog.messages[0]
        assert "2" in caplog.messages[1]
Example #7
0
def main():
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)

    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    bench = Benchmark()

    def process(msg):
        executor.submit(process_msg, msg, bench)

    reader = MQReader(LISTENER_TOPIC, )
    reader.listen(process)

    loop.run_forever()
    LOGGER.info("Shutting down.")
def main():  # pylint: disable=too-many-statements
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info("Starting upload listener.")

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):  # pylint: disable=too-many-return-statements,too-many-branches
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return
        FailedCache.process_failed_cache(FailedCache.upload_cache, executor,
                                         process_upload, loop)
        FailedCache.process_failed_cache(FailedCache.delete_cache, executor,
                                         process_delete, loop)

        if msg.topic == mqueue.UPLOAD_TOPIC:
            if not validate_msg(msg_dict, "upload",
                                REQUIRED_UPLOAD_MESSAGE_FIELDS):
                return
            # send message to payload tracker
            send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                        msg_dict,
                                        'received',
                                        loop=loop)
            # proces only archives from smart_management accounts
            identity = get_identity(
                msg_dict["platform_metadata"]["b64_identity"])
            if identity is None:
                INVALID_IDENTITY.inc()
                error_msg = "Skipped upload due to invalid identity header."
                LOGGER.warning(error_msg)
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict,
                                            'error',
                                            status_msg=error_msg,
                                            loop=loop)
                return
            if not is_entitled_smart_management(identity,
                                                allow_missing_section=True):
                MISSING_SMART_MANAGEMENT.inc()
                error_msg = "Skipped upload due to missing smart_management entitlement."
                LOGGER.debug(error_msg)
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict,
                                            'error',
                                            status_msg=error_msg,
                                            loop=loop)
                return
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if not validate_msg(msg_dict, "event",
                                REQUIRED_EVENT_MESSAGE_FIELDS):
                return
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        future = executor.submit(process_func, msg_dict, loop=loop)
        future.add_done_callback(on_thread_done)

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        executor.shutdown()
def main():
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info("Starting upload listener.")

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return

        if msg.topic == mqueue.UPLOAD_TOPIC:
            # proces only archives from smart_management accounts
            identity = get_identity(msg_dict.get("b64_identity", ""))
            if identity is None:
                INVALID_IDENTITY.inc()
                LOGGER.warning(
                    "Skipped upload due to invalid identity header.")
                return
            if not is_entitled_smart_management(identity,
                                                allow_missing_section=True):
                MISSING_SMART_MANAGEMENT.inc()
                LOGGER.debug(
                    "Skipped upload due to missing smart_management entitlement."
                )
                return
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        if 'id' not in msg_dict or msg_dict["id"] is None:
            MISSING_ID.inc()
            LOGGER.warning(
                "Unable to process message, inventory ID is missing.")
            return

        future = executor.submit(process_func, msg_dict, loop=loop)
        future.add_done_callback(on_thread_done)

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        executor.shutdown()
 def set_listener_ctx():
     """Setup upload listener context"""
     ListenerCtx.loop = asyncio.get_event_loop()
     ListenerCtx.executor = BoundedExecutor(CFG.max_queue_size,
                                            max_workers=WORKER_THREADS)
def main():
    """Application entrypoint"""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info('Starting advisor listener.')

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message on topic %s', msg.topic)
        LOGGER.debug('Message body: %s', msg.value)

        try:
            msg_dict = json.loads(msg.value.decode('utf8'))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception('Unable to parse message: ')
            return

        identity = get_identity(
            msg_dict['input']['platform_metadata']['b64_identity'])
        if identity is None:
            INVALID_IDENTITY.inc()
            LOGGER.warning(
                'Skipped advisor result due to invalid identity header.')
            return
        if not is_entitled_insights(identity, allow_missing_section=True):
            MISSING_INSIGHTS_ENTITLEMENT.inc()
            LOGGER.debug(
                'Skipped advisor result due to missing insights entitlement.')
            return

        # TODO: insert system into database if it's 1st upload, shall we update last seen?
        system_data = {
            'rh_account': msg_dict['input']['host']['account'],
            'display_name': msg_dict['input']['host']['display_name'],
            'inventory_id': msg_dict['input']['host']['id']
        }

        rule_hits = {}

        reports = msg_dict['results']['reports']
        for report in reports:
            if 'cves' in report['details']:
                rule = report['rule_id']
                if rule in RULE_BLACKLIST:
                    # TODO: remove this once CVE_2017_5753_4_cpu_kernel and CVE_2017_5715_cpu_virt are merged
                    continue
                if rule not in RULES_CACHE:
                    db_import_rule(rule,
                                   list(report['details']['cves'].keys()))
                for cve in report['details']['cves']:
                    if cve not in CVES_CACHE:
                        db_import_cve(cve)
                    if not report['details']['cves'][
                            cve]:  # False in the CVE dict indicates failed rule
                        rule_hits[CVES_CACHE[cve]] = {
                            'id': RULES_CACHE[rule],
                            'details': json.dumps(report['details'])
                        }

        db_import_system(system_data, rule_hits)

    with DatabasePool(WORKER_THREADS):
        db_init_caches()

        ADVISOR_QUEUE.listen(process_message)

        loop.run_forever()

        LOGGER.info('Shutting down.')
        executor.shutdown()
def main():
    # pylint: disable=too-many-branches, too-many-statements
    """Application entrypoint"""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    ensure_minimal_schema_version()
    LOGGER.info('Starting advisor listener.')

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.debug('Message from topic %s, body: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode('utf8'))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception('Unable to parse message: ')
            return

        send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                    msg_dict['input'], 'processing',
                                    'Starting advisor evaluation')

        if not validate_kafka_msg(msg_dict, REQUIRED_MESSAGE_FIELDS):
            INVALID_INSIGHTS_ACC.inc()
            send_msg_to_payload_tracker(
                PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                'Skipped advisor result due to message coming from non-insights account.'
            )
            LOGGER.debug(
                'Skipped advisor result due to coming from non-insights account.'
            )
        identity = get_identity(
            msg_dict['input']['platform_metadata']['b64_identity'])
        if identity is None:
            INVALID_IDENTITY.inc()
            send_msg_to_payload_tracker(
                PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                'Skipped advisor result due to invalid identity header.')
            LOGGER.debug(
                'Skipped advisor result due to invalid identity header.')
            return
        if not is_entitled_insights(identity, allow_missing_section=True):
            MISSING_INSIGHTS_ENTITLEMENT.inc()
            send_msg_to_payload_tracker(
                PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                'Skipped advisor result due to missing insights entitlement.')
            LOGGER.debug(
                'Skipped advisor result due to missing insights entitlement.')
            return

        # TODO: insert system into database if it's 1st upload, shall we update last seen?
        system_data = {
            'rh_account':
            msg_dict['input']['host']['account'],
            'display_name':
            msg_dict['input']['host']['display_name'],
            'inventory_id':
            msg_dict['input']['host']['id'],
            'stale_timestamp':
            msg_dict['input']['host']['stale_timestamp'],
            'stale_warning_timestamp':
            msg_dict['input']['host']['stale_warning_timestamp'],
            'culled_timestamp':
            msg_dict['input']['host']['culled_timestamp']
        }

        LOGGER.info("Evaluating rule hits for inventory_id: %s",
                    system_data["inventory_id"])

        rule_hits = {}

        reports = msg_dict['results']['reports']
        for report in reports:
            if 'cves' in report['details']:
                rule = report['rule_id']
                if rule in RULE_BLACKLIST:
                    # TODO: remove this once CVE_2017_5753_4_cpu_kernel and CVE_2017_5715_cpu_virt are merged
                    continue
                if rule not in RULES_CACHE:
                    db_import_rule(rule,
                                   list(report['details']['cves'].keys()))
                for cve in report['details']['cves']:
                    if cve not in CVES_CACHE:
                        db_import_cve(cve)
                    if not report['details']['cves'][
                            cve]:  # False in the CVE dict indicates failed rule
                        rule_hits[CVES_CACHE[cve]] = {
                            'id': RULES_CACHE[rule],
                            'details': json.dumps(report['details']),
                            'cve_name': cve
                        }
                    elif report['details']['cves'][cve]:
                        rule_hits[CVES_CACHE[cve]] = {
                            'id': RULES_CACHE[rule],
                            'mitigation_reason': report['details']['cves'][cve]
                        }

        try:
            success = db_import_system(system_data, rule_hits, loop)
        except DatabaseError as exc:
            success = False
            # The exception should not get lost
            raise exc
        finally:
            LOGGER.debug("Finished evaluating rule hits for inventory_id: %s",
                         system_data["inventory_id"])
            if success:
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict['input'], 'success')
            else:
                send_msg_to_payload_tracker(
                    PAYLOAD_TRACKER_PRODUCER, msg_dict['input'], 'error',
                    'Error importing system to vulnerability')

    with DatabasePool(WORKER_THREADS):
        db_init_caches()

        ADVISOR_QUEUE.listen(process_message)

        loop.run_forever()

        LOGGER.info('Shutting down.')
        executor.shutdown()