예제 #1
0
 def test_import_rule(self, pg_db_conn):  # pylint: disable=unused-argument
     """Test inserting new rule into database"""
     with DatabasePool(1):
         db_import_rule('test_rule|ERROR_KEY', ['CVE-2018-1', 'CVE-2018-2'])
         assert 'test_rule|ERROR_KEY' in RULES_CACHE
     with DatabasePool(2):
         db_import_rule('test_rule|CVES_NOT_IN_DB', ['CVE-2020-1', 'CVE-2020-2'])
         assert 'test_rule|CVES_NOT_IN_DB' in RULES_CACHE
예제 #2
0
def main():
    """Main VMaaS listener entrypoint."""
    init_logging()

    loop = asyncio.get_event_loop()
    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(
        status_app,
        int(CFG.prometheus_port or CFG.vmaas_sync_prometheus_port),
        LOGGER,
        loop,
    )
    loop.run_until_complete(status_site.start())

    loop.run_until_complete(a_ensure_minimal_schema_version())

    LOGGER.info("Starting VMaaS sync service.")
    with DatabasePool(1):
        app_cont = VmaasSyncContext()

        def terminate(*_):
            """Trigger shutdown."""
            LOGGER.info("Signal received, stopping application.")
            loop.add_callback_from_signal(app_cont.app.shutdown)

        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            signal.signal(sig, terminate)

        web.run_app(app_cont.app, port=CFG.private_port)

    LOGGER.info("Shutting down.")
    def test_process_update(self, pg_db_conn, caplog):  # pylint: disable=unused-argument
        """Test updating a system."""

        with DatabasePool(1):
            # make sure system is in DB
            db_import_system(A_SYSTEM, A_SYSTEM['vmaas-json'], [])

            # update the system
            with caplog.at_level(logging.INFO):
                process_update(A_SYSTEM_UPDATE)
            assert caplog.records[0].msg.startswith(
                "Updated system with inventory_id:")
            caplog.clear()

            # try to update system with non-existing id
            with caplog.at_level(logging.INFO):
                process_update({
                    'host': {
                        'id': '1-XXX',
                        'display_name': 'new.example.com'
                    }
                })
            assert caplog.records[0].msg.startswith(
                'Unable to update system, inventory_id not found: ')
            caplog.clear()
    def test_process_msg_updated(self, caplog):
        """Test updated type msg"""
        ListenerCtx.set_listener_ctx()
        executor = ExecutorMock(process_update)
        ListenerCtx.executor = executor

        CFG.events_topic = TEST_TOPIC
        msg = KafkaMsgMock(
            TEST_TOPIC, """{
            "type": "updated",
            "host": {
                "id": "00000000-0000-0000-0000-000000000001",
                "account": 123,
                "system_profile": {},
                "display_name": 123,
                "insights_id": "11111111-0000-0000-0000-000000000000"
            },
            "timestamp": "2021-07-29T16:01:54.265289387Z"
        }""".encode("utf-8"))

        with caplog.at_level(logging.INFO):
            with DatabasePool(1):
                process_message(msg)

        assert "Received update event msg" in caplog.messages[0]
    def test_process_msg_created(self, caplog, monkeypatch):
        """Test created type msg"""
        ListenerCtx.set_listener_ctx()
        executor = ExecutorMock(process_upload)
        ListenerCtx.executor = executor

        CFG.events_topic = TEST_TOPIC
        msg = KafkaMsgMock(
            TEST_TOPIC, """{
                "type": "created",
                "host": {
                    "id": "00000000-0000-0000-0000-000000000001",
                    "account": 123,
                    "system_profile": {},
                    "display_name": 123,
                    "insights_id": "11111111-0000-0000-0000-000000000000"
                },
                "timestamp": "2021-07-29T16:01:54.265289387Z",
                "platform_metadata": {
                    "b64_identity": 123,
                    "url": 123
                }
            }""".encode("utf-8"))

        monkeypatch.setattr(utils, "send_msg_to_payload_tracker", empty_fun)
        monkeypatch.setattr(listener.upload_listener, "get_identity",
                            empty_fun)
        monkeypatch.setattr(listener.upload_listener, "is_entitled_insights",
                            empty_fun)

        with caplog.at_level(logging.INFO):
            with DatabasePool(1):
                process_message(msg)

        assert "Received created/updated msg" in caplog.messages[0]
    def test_process_delete(self, pg_db_conn, caplog):  # pylint: disable=unused-argument
        """Test deleting a system."""

        msg_dict = {
            'id': '0c32021e-8af8-4186-afb4-0255a71ece96',
            'account': A_SYSTEM_DELETE['account']
        }

        with DatabasePool(1):
            # make sure system is in DB
            system_copy = deepcopy(A_SYSTEM)
            system_copy['host']['id'] = msg_dict['id']
            db_import_system(system_copy, system_copy['vmaas-json'], [])

            # delete the system
            with caplog.at_level(logging.INFO):
                process_delete(msg_dict)
            assert caplog.records[0].msg.startswith(
                "Deleted system with inventory_id:")
            caplog.clear()

            with caplog.at_level(logging.WARNING):
                db_import_system(system_copy, system_copy['vmaas-json'], [])
            assert caplog.records[0].msg.startswith(
                'Received recently deleted inventory id:')
            caplog.clear()
예제 #7
0
    def test_delete_system(self, pg_db_conn, caplog):  # pylint: disable=unused-argument
        """Test deleting a system."""
        with DatabasePool(1):
            # make sure system is in DB
            db_import_system(A_SYSTEM['inv-id'], A_SYSTEM['rh-acct'],
                             A_SYSTEM['s3-url'], A_SYSTEM['vmaas-json'],
                             A_SYSTEM['managed'])

            # now delete the system
            rtrn = db_delete_system(A_SYSTEM['inv-id'])
            assert rtrn['deleted']
            assert not rtrn['failed']

            # try to delete it again
            rtrn = db_delete_system(A_SYSTEM['inv-id'])
            assert not rtrn['deleted']
            assert not rtrn['failed']

            # try to delete system with invalid id
            with caplog.at_level(logging.ERROR):
                rtrn = db_delete_system(0)
            assert not rtrn['deleted']
            assert rtrn['failed']
            assert caplog.records[0].msg.startswith("Error deleting system:")
            caplog.clear()
    def test_mitigation_to_hit(self, pg_db_conn, cleanup):  # pylint: disable=unused-argument
        """Test replacing rule which has mitigation and is actvie with another one which is inactive but has mitigation"""
        with DatabasePool(2):
            with DatabasePoolConnection() as conn:
                with conn.cursor() as cur:
                    inv_id = 'INV-17'
                    system = deepcopy(SYSTEM_DICT)
                    system['inventory_id'] = inv_id
                    orig_cve_count_cache = self._system_cache(cur, inv_id)
                    orig_caches = self._account_caches(cur)

                    db_import_system(
                        system, {
                            1: {
                                'id': 1,
                                'mitigation_reason': 'SELinux mitigates',
                                'cve_name': 'CVE-2014-1'
                            },
                            9: {
                                'id': 4,
                                'details': '{"detail_key": "detail_value"}',
                                'cve_name': 'CVE-2018-1'
                            }
                        })

                    new_cve_count_cache = self._system_cache(cur, inv_id)
                    new_caches = self._account_caches(cur)

                    self._test_counts(orig_cve_count_cache,
                                      new_cve_count_cache, orig_caches,
                                      new_caches, 1, 9)
                    assert all([
                        self._cache_check(cur, account_id)
                        for account_id in ('0', '1', '2')
                    ])
예제 #9
0
    def upgrade(self):
        """perform database upgrade"""
        with DatabasePool(1):
            with DatabasePoolConnection() as conn:
                try:
                    self._get_db_lock(conn)

                    db_version = self._get_current_db_version(conn)

                    if db_version == self.version_max:
                        LOGGER.info('Database is up to date at version: %d',
                                    db_version)
                        return
                    if db_version > self.version_max:
                        msg = 'Database version %d is greater than upgrade version %d' % (
                            db_version, self.version_max)
                        LOGGER.warning(msg)
                        return

                    LOGGER.info(
                        'Database requires upgrade from version %d to %d',
                        db_version, self.version_max)
                    upgrades_to_apply = self._get_upgrades_to_apply(
                        db_version, self.version_max)
                    for upgrade in upgrades_to_apply:
                        self._apply_upgrade(upgrade['ver'], upgrade['script'],
                                            conn)
                finally:
                    self._release_db_lock(conn)
예제 #10
0
    async def test_read_websocket(self, pg_db_conn, monkeypatch, caplog):  # pylint: disable=unused-argument
        """Test getting msg from mqueue - requires combined sync_cve/re-evaluate setup"""
        # prep for sync_cve_md
        monkeypatch.setattr(
            vmaas_sync, 'paging', lambda endpoint, cve_request:
            (True, CVES_RESPONSE))
        monkeypatch.setattr(vmaas_sync, 'external_service_request',
                            lambda endpoint, method, headers, verify: [{}])
        # prep for re_evaluate_systems
        future = asyncio.Future()
        future.set_result(True)
        monkeypatch.setattr(VmaasSyncContext, '_websocket_reconnect',
                            lambda self: future)

        msg = VmaasWebsocketMsgMock("webapps-refreshed", WSMsgType.TEXT)

        srvapp = VmaasSyncContext()
        srvapp.evaluator_queue = TestMqueueWriter(
        )  # mqueue.MQWriter(mqueue.EVALUATOR_TOPIC)
        srvapp.vmaas_websocket = VmaasWebsocketMock([msg])

        with caplog.at_level(logging.INFO):
            with DatabasePool(1):
                await TestVmaasSync.timed_future(srvapp._websocket_loop, 0.1)  # pylint: disable=protected-access

        assert caplog.records[0].msg == 'VMaaS cache refreshed'
        self.check_sync_logs(caplog.records, 1)
        self.check_sync_exploits_logs(caplog.records, 4)
        self.check_re_evaluate_all_logs(caplog.records, 6)
        caplog.clear()
def main():  # pylint: disable=too-many-statements
    """Main kafka listener entrypoint."""
    init_logging()

    loop = asyncio.get_event_loop()
    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT),
                                          LOGGER, loop)
    loop.run_until_complete(status_site.start())

    loop.run_until_complete(a_ensure_minimal_schema_version())
    LOGGER.info("Starting upload listener.")

    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))

    ListenerCtx.set_listener_ctx()

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        ListenerCtx.executor.shutdown()
    def test_process_delete(self, pg_db_conn, caplog):  # pylint: disable=unused-argument
        """Test deleting a system."""

        msg_dict = {'id': A_SYSTEM_DELETE['id'] + '1'}

        with DatabasePool(1):
            # make sure system is in DB
            system_copy = deepcopy(A_SYSTEM)
            system_copy['host']['id'] += '1'
            db_import_system(system_copy, system_copy['vmaas-json'], [])

            # delete the system
            with caplog.at_level(logging.INFO):
                process_delete(msg_dict)
            assert caplog.records[0].msg.startswith(
                "Deleted system with inventory_id:")
            caplog.clear()

            # try to delete the system again
            with caplog.at_level(logging.INFO):
                process_delete(msg_dict)
            assert caplog.records[0].msg.startswith(
                "Unable to delete system, inventory_id not found:")
            caplog.clear()

            with caplog.at_level(logging.WARNING):
                db_import_system(system_copy, system_copy['vmaas-json'], [])
            assert caplog.records[0].msg.startswith(
                'Received recently deleted inventory id:')
            caplog.clear()
예제 #13
0
    def test_rule_adding_cve(self, pg_db_conn, cleanup, cve_id, rule_id,
                             cve_name):  # pylint: disable=unused-argument
        """
        Tests adding a rule which adds one CVE to a system
        In some we are removing other rules from a system, but the rules in this tests were chosen so they're either not active or not VMaaS mitigated
        """
        with DatabasePool(2):
            with DatabasePoolConnection() as conn:
                with conn.cursor() as cur:
                    system = deepcopy(SYSTEM_DICT)
                    system['inventory_id'] = 'INV-4'
                    rule_hits = {
                        cve_id: {
                            'id': rule_id,
                            'details': '{"detail_key": "detail_value"}',
                            'cve_name': cve_name
                        }
                    }

                    orig_cve_count_cache = self._system_cache(cur, 'INV-4')
                    orig_caches = self._account_caches(cur)

                    db_import_system(system, rule_hits)

                    new_cve_count_cache = self._system_cache(cur, 'INV-4')
                    new_caches = self._account_caches(cur)

                    self._test_counts(orig_cve_count_cache,
                                      new_cve_count_cache, orig_caches,
                                      new_caches, 1, cve_id)
                    assert all([
                        self._cache_check(cur, account_id)
                        for account_id in ('0', '1', '2')
                    ])
    def test_process_upload(self, pg_db_conn, monkeypatch, caplog):  # pylint: disable=unused-argument
        """Test to see that upload only sends eval-msgs on new systems and ones with new vmaas_json"""
        same_json = "{'diff': False}"
        diff_json = "{'diff': True}"
        upld_data = {
            'id': 'A-SYSTEM-ID',
            'account': 'AN-ACCOUNT',
            'url': 'A-URL'
        }
        monkeypatch.setattr(MQWriter, 'send',
                            lambda self, msg, loop: LOGGER.info('SENT'))
        monkeypatch.setattr(listener.upload_listener, 'parse_archive',
                            lambda upld_dta: (same_json, []))

        with DatabasePool(1):
            # first-upload - should send
            caplog.clear()
            with caplog.at_level(logging.INFO):
                process_upload(upld_data, None)
            assert caplog.records[0].msg == 'SENT'

            # re-upload - should not send
            caplog.clear()
            with caplog.at_level(logging.INFO):
                process_upload(upld_data, None)
            assert not caplog.records

            # same-id, diff-vmaas upload - should send
            monkeypatch.setattr(listener.upload_listener, 'parse_archive',
                                lambda upld_dta: (diff_json, []))
            caplog.clear()
            with caplog.at_level(logging.INFO):
                process_upload(upld_data, None)
            assert caplog.records[0].msg == 'SENT'
    def test_import_system(self, pg_db_conn, caplog):  # pylint: disable=unused-argument
        """Test importing a system not-in-the-db, followed by same so it's-already-there"""
        # new system
        with DatabasePool(1):
            rtrn = db_import_system(A_SYSTEM, A_SYSTEM['vmaas-json'], [])
            assert ImportStatus.INSERTED | ImportStatus.CHANGED == rtrn

            self._mark_evaluated(A_SYSTEM['host']['id'])

            # And now it's an rtrn['updated'], but same json
            rtrn = db_import_system(A_SYSTEM, A_SYSTEM['vmaas-json'], [])
            assert ImportStatus.UPDATED == rtrn

            # And now it's another rtrn['updated'], same json
            rtrn = db_import_system(A_SYSTEM, A_SYSTEM['vmaas-json'], [])
            assert ImportStatus.UPDATED == rtrn

            # And now it's an rtrn['updated'], with diff json
            rtrn = db_import_system(A_SYSTEM, A_SYSTEM['vmaas-json'] + '-1',
                                    [])
            assert ImportStatus.UPDATED | ImportStatus.CHANGED == rtrn

            # And try to import system with invalid inventory id
            with caplog.at_level(logging.ERROR):
                system_copy = deepcopy(A_SYSTEM)
                system_copy['host']['id'] = None
                db_import_system(system_copy, A_SYSTEM['vmaas-json'], [])
            assert caplog.records[0].msg.startswith("Error importing system:")
            caplog.clear()
예제 #16
0
    def test_rule_not_changing_cves(self, pg_db_conn, cleanup, inv_id, cve_id,
                                    rule_id, cve_name):  # pylint: disable=unused-argument
        """Inserts CVE which is tied to inactive rule, nothing shall happen"""
        with DatabasePool(2):
            with DatabasePoolConnection() as conn:
                with conn.cursor() as cur:
                    system = deepcopy(SYSTEM_DICT)
                    system['inventory_id'] = inv_id
                    rule_hits = {
                        cve_id: {
                            'id': rule_id,
                            'details': '{"detail_key": "detail_value"}',
                            'cve_name': cve_name
                        }
                    }

                    orig_cve_count_cache = self._system_cache(cur, inv_id)
                    orig_caches = self._account_caches(cur)

                    db_import_system(system, rule_hits)

                    new_cve_count_cache = self._system_cache(cur, inv_id)
                    new_caches = self._account_caches(cur)

                    assert orig_cve_count_cache == new_cve_count_cache
                    assert orig_caches == new_caches
                    assert all([
                        self._cache_check(cur, account_id)
                        for account_id in ('0', '1', '2')
                    ])
def main():
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info("Starting upload listener.")

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return

        if msg.topic == mqueue.UPLOAD_TOPIC:
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        if 'id' not in msg_dict or msg_dict["id"] is None:
            MISSING_ID.inc()
            LOGGER.warning(
                "Unable to process message, inventory ID is missing.")
            return

        future = executor.submit(process_func, msg_dict, loop=loop)
        future.add_done_callback(on_thread_done)

    with DatabasePool(WORKER_THREADS):
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        executor.shutdown()
예제 #18
0
def main():
    """Main entrypoint"""
    with DatabasePool(1):
        bucket_mgr = S3BucketManager(debug=DEBUG)
        mgather = MetricsGatherer(bucket_mgr,
                                  exclude_accounts_text=EXCLUDE_ACCOUNTS,
                                  debug=DEBUG)

        mgather.run()
예제 #19
0
 def test_import_system_platform(self, pg_db_conn):  # pylint: disable=unused-argument
     """Test insertion of system data"""
     with DatabasePool(1):
         with DatabasePoolConnection() as conn:
             with conn.cursor() as cur:
                 system = deepcopy(SYSTEM_DICT)
                 system['inventory_id'] = 'INV-111'
                 db_import_system_platform(cur, system)  # import new system
                 db_import_system_platform(cur, system)  # update recently imported system
예제 #20
0
    def test_sync_cve_md_negative(self, pg_db_conn, monkeypatch, caplog, cleanup):  # pylint: disable=unused-argument
        """Test calling sync_cve_md with vmaas responding with nothing"""
        monkeypatch.setattr(vmaas_sync, 'paging', lambda endpoint, cve_request: (False, {}))

        with caplog.at_level(logging.INFO):
            with DatabasePool(1):
                result = sync_cve_md()
            assert not result
        assert caplog.records[0].msg == 'Syncing CVE metadata'
        caplog.clear()
예제 #21
0
    def test_sync_cve_md_positive(self, pg_db_conn, monkeypatch, caplog, cleanup):  # pylint: disable=unused-argument
        """Test calling sync_cve_md with vmaas responding with data"""
        monkeypatch.setattr(vmaas_sync, 'paging', lambda endpoint, cve_request: (True, CVES_RESPONSE))

        with caplog.at_level(logging.INFO):
            with DatabasePool(1):
                result = sync_cve_md()
            assert result
        self.check_sync_logs(caplog.records, 0)
        caplog.clear()
예제 #22
0
 async def test_re_evaluate_all(self, pg_db_conn, monkeypatch, caplog):  # pylint: disable=unused-argument
     """Test re_evaluate_systems - all-systems mode"""
     monkeypatch.setattr(ServerApplication, '_websocket_reconnect', lambda self: True)
     monkeypatch.setattr(tornado.websocket, 'websocket_connect', lambda: True)
     srvapp = VmaasServerApplication()
     srvapp.evaluator_queue = TestMqueueWriter()  # mqueue.MQWriter(mqueue.EVALUATOR_TOPIC)
     with caplog.at_level(logging.INFO):
         with DatabasePool(1):
             await srvapp.re_evaluate_systems(repo_based=False)
     self.check_re_evaluate_all_logs(caplog.records, 0)
     caplog.clear()
 async def test_re_evaluate_all(self, pg_db_conn, monkeypatch, caplog):  # pylint: disable=unused-argument
     """Test re_evaluate_systems - all-systems mode"""
     VmaasSyncContext.evaluator_queue = TestMqueueWriter()
     future = asyncio.Future()
     future.set_result(True)
     monkeypatch.setattr(VmaasSyncContext, '_websocket_reconnect', lambda self: future)
     with caplog.at_level(logging.INFO):
         with DatabasePool(1):
             await ReEvaluateHandler.re_evaluate_systems(repo_based=False)
     self.check_re_evaluate_all_logs(caplog.records, 0)
     caplog.clear()
예제 #24
0
 async def test_re_evaluate_repo_based(self, pg_db_conn, monkeypatch, caplog):  # pylint: disable=unused-argument
     """Test re_evaluate_systems - repo-based mode with page control"""
     monkeypatch.setattr(vmaas_sync, 'paging', lambda endpoint, repos_request: (True, REPOS_RESPONSE))
     monkeypatch.setattr(ServerApplication, '_websocket_reconnect', lambda self: True)
     monkeypatch.setattr(tornado.websocket, 'websocket_connect', lambda: True)
     srvapp = VmaasServerApplication()
     srvapp.evaluator_queue = TestMqueueWriter()  # mqueue.MQWriter(mqueue.EVALUATOR_TOPIC)
     with caplog.at_level(logging.INFO):
         with DatabasePool(1):
             await srvapp.re_evaluate_systems(repo_based=True)
     self.check_re_eval_repo_based_logs(caplog.records, 0)
     caplog.clear()
 async def test_re_evaluate_repo_based(self, pg_db_conn, monkeypatch, caplog):  # pylint: disable=unused-argument
     """Test re_evaluate_systems - repo-based mode with page control"""
     monkeypatch.setattr(vmaas_sync, 'paging', lambda endpoint, repos_request: (True, REPOS_RESPONSE))
     future = asyncio.Future()
     future.set_result(True)
     monkeypatch.setattr(VmaasSyncContext, '_websocket_reconnect', lambda self: future)
     VmaasSyncContext.evaluator_queue = TestMqueueWriter()  # mqueue.MQWriter(mqueue.EVALUATOR_TOPIC)
     with caplog.at_level(logging.INFO):
         with DatabasePool(1):
             await ReEvaluateHandler.re_evaluate_systems(repo_based=True)
     self.check_re_eval_repo_based_logs(caplog.records, 0)
     caplog.clear()
예제 #26
0
def ensure_minimal_schema_version():
    """Ensure that database schema is up-to-date, wait if it's not."""
    with DatabasePool(1):
        with DatabasePoolConnection() as conn:
            with conn.cursor() as cur:
                while True:
                    cur.execute("SELECT version FROM db_version WHERE name = 'schema_version'")
                    current_schema = int(cur.fetchone()[0])
                    if current_schema >= MINIMAL_SCHEMA:
                        LOGGER.info("Current schema version: %s, minimal required: %s, OK", current_schema, MINIMAL_SCHEMA)
                        return
                    LOGGER.warning("Current schema version: %s, minimal required: %s, waiting...", current_schema, MINIMAL_SCHEMA)
                    sleep(10)
예제 #27
0
    def run(self):
        """
        This method evaluates incoming system package profiles using VMaaS
        :return:
        """

        loop = asyncio.get_event_loop()
        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            loop.add_signal_handler(
                sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
        executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

        # pylint: disable=too-many-branches
        def process_message(message):
            """Message procession logic"""
            try:
                msg_dict = json.loads(message.value.decode('utf-8'))
                FailedCache.process_failed_cache(FailedCache.upload_cache, executor, self.process_upload_or_re_evaluate, loop)
            except json.decoder.JSONDecodeError:
                MESSAGE_PARSE_ERROR.inc()
                LOGGER.exception("Unable to parse message: ")
                return
            if message.topic in kafka_evaluator_topic:
                if 'type' not in msg_dict:
                    LOGGER.error("Received message is missing type field: %s", msg_dict)
                    return
                if msg_dict['type'] in ['upload_new_file', 're-evaluate_system']:
                    process_func = self.process_upload_or_re_evaluate
                    if msg_dict['type'] == 'upload_new_file':
                        send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER, msg_dict, 'processing',
                                                    status_msg='Scheduled for evaluation', loop=loop)
                else:
                    UNKNOWN_MSG.inc()
                    LOGGER.error("Received unknown message type: %s", msg_dict['type'])
                    return

                future = executor.submit(process_func, msg_dict, loop=loop)
                future.add_done_callback(on_thread_done)
            else:
                UNKNOWN_TOPIC.inc()
                LOGGER.error("Received message on unsupported topic: %s", message.topic)

        with DatabasePool(WORKER_THREADS):
            CONSUMER_QUEUE.listen(process_message)

            # wait until loop is stopped from terminate callback
            loop.run_forever()

            LOGGER.info("Shutting down.")
            executor.shutdown()
예제 #28
0
def run():
    """Application entrypoint"""
    LOGGER.info("Started cacheman job.")

    conn = get_conn()
    cur = conn.cursor()

    current_cache = {}
    cur.execute("""SELECT rh_account_id, cve_id, systems_affected, systems_status_divergent
                   FROM cve_account_cache""")
    for rh_account_id, cve_id, systems_affected, systems_status_divergent in cur.fetchall():
        current_cache.setdefault(rh_account_id, {})[cve_id] = (systems_affected, systems_status_divergent)

    cur.execute("""SELECT sp.rh_account_id, a.name, a.cve_cache_from,
                          GREATEST(MAX(sp.last_evaluation), MAX(sp.advisor_evaluated), MAX(sp.when_deleted),
                                   MAX(a.last_status_change)) AS last_system_change,
                          a.cve_cache_keepalive,
                          COUNT(*) AS total_systems
                   FROM system_platform sp INNER JOIN
                        rh_account a on sp.rh_account_id = a.id
                   GROUP BY sp.rh_account_id, a.name, a.cve_cache_from, a.cve_cache_keepalive
                   HAVING COUNT(*) >= %s""", (CFG.cache_minimal_account_systems,))
    accounts = [(account_id, account_name, cve_cache_from, last_system_change, cve_cache_keepalive)
                for account_id, account_name, cve_cache_from, last_system_change, cve_cache_keepalive, _ in cur.fetchall()
                if validate_cve_cache_keepalive(cve_cache_keepalive, 2)]
    LOGGER.info("Accounts with enabled cache: %s", len(accounts))
    accounts_to_refresh = [account for account in accounts if account[3] and (not account[2] or account[3] > account[2])]
    LOGGER.info("Accounts requiring cache refresh: %s", len(accounts_to_refresh))

    # Process accounts in parallel
    with DatabasePool(CACHE_WORKERS):
        executor = BoundedExecutor(CACHE_WORKERS, max_workers=CACHE_WORKERS)
        futures = []
        for account_id, account_name, _, _, _ in accounts_to_refresh:
            futures.append(executor.submit(_materialize_account_cache, account_id, account_name, current_cache))
        for future in futures:
            future.result()
        executor.shutdown()
    # Pop out cached accounts after all workers are done
    for account_id, _, _, _, _ in accounts:
        current_cache.pop(account_id, None)

    LOGGER.info("Accounts to disable cache: %s", len(current_cache))
    for account_id in current_cache:
        cur.execute("""DELETE FROM cve_account_cache WHERE rh_account_id = %s""", (account_id,))
        cur.execute("""UPDATE rh_account SET cve_cache_from = NULL WHERE id = %s""", (account_id,))
        conn.commit()

    cur.close()
    conn.close()
    LOGGER.info("Finished cacheman job.")
    def test_sync_cve_md_negative(self, pg_db_conn, monkeypatch, caplog,
                                  cleanup):  # pylint: disable=unused-argument
        """Test calling sync_cve_md with vmaas responding with nothing"""
        monkeypatch.setattr('vmaas_sync.vmaas_sync.vmaas_post_request',
                            lambda endpoint, cve_request, session: None)

        with caplog.at_level(logging.INFO):
            with DatabasePool(1):
                result = sync_cve_md(TestMqueueWriter())
            assert not result
        assert caplog.records[0].msg == 'Syncing CVE metadata'
        assert caplog.records[1].msg.startswith('Downloading CVE metadata')
        assert caplog.records[2].msg == 'Finished syncing CVE metadata'
        caplog.clear()
    def test_init_repo_cache(self, pg_db_conn):
        """Test initializing repo cache."""

        cur = pg_db_conn.cursor()
        self._clean_tmp_db_items(cur, ("s1", ))
        inserted = db_import_repos(cur, ["repo1", "repo2", "repo1"])
        assert len(inserted) == 2
        assert set(inserted) == {"repo1", "repo2"}
        assert len(listener.upload_listener.REPO_ID_CACHE) == 2
        listener.upload_listener.REPO_ID_CACHE = {}
        with DatabasePool(1):
            db_init_repo_cache()
        assert len(listener.upload_listener.REPO_ID_CACHE) == 2
        self._clean_tmp_db_items(cur, ("s1", ))