def process(self): """Process the message.""" if self.event_type in (KAFKA_APPLICATION_CREATE,): storage.create_source_event(self.source_id, self.auth_header, self.offset) if storage.is_known_source(self.source_id): if self.event_type in (KAFKA_APPLICATION_CREATE,): self.save_sources_details() self.save_source_info(bill=True) # _Authentication_ messages are responsible for saving credentials. # However, OCP does not send an Auth message. Therefore, we need # to run the following branch for OCP which completes the source # creation cycle for an OCP source. if storage.get_source_type(self.source_id) == Provider.PROVIDER_OCP: self.save_source_info(auth=True) if self.event_type in (KAFKA_APPLICATION_UPDATE,): if storage.get_source_type(self.source_id) == Provider.PROVIDER_AZURE: # Because azure auth is split in Sources backend, we need to check both # auth and billing when we recieve either auth update or app update event updated = self.save_source_info(auth=True, bill=True) else: updated = self.save_source_info(bill=True) if updated: LOG.info(f"[ApplicationMsgProcessor] source_id {self.source_id} updated") storage.enqueue_source_create_or_update(self.source_id) else: LOG.info(f"[ApplicationMsgProcessor] source_id {self.source_id} not updated. No changes detected.") if self.event_type in (KAFKA_APPLICATION_DESTROY,): storage.enqueue_source_delete(self.source_id, self.offset, allow_out_of_order=True)
def test_enqueue_source_delete_out_of_order_source_destroy(self): """Test for enqueuing source delete before receving create for Source.destroy.""" test_source_id = 3 test_offset = 4 storage.enqueue_source_delete(test_source_id, test_offset, allow_out_of_order=False) self.assertFalse(Sources.objects.filter(source_id=test_source_id).exists())
def test_enqueue_source_delete_out_of_order(self): """Test for enqueuing source delete before receving create.""" test_source_id = 3 test_offset = 4 storage.enqueue_source_delete(test_source_id, test_offset, allow_out_of_order=True) response = Sources.objects.get(source_id=test_source_id) self.assertTrue(response.out_of_order_delete)
def process_message(app_type_id, msg): # noqa: C901 """ Process message from Platform-Sources kafka service. Handler for various application/source create and delete events. 'create' events: Issues a Sources REST API call to get additional context for the Platform-Sources kafka event. This information is stored in the Sources database table. 'destroy' events: Enqueues a source delete event which will be processed in the synchronize_sources method. Args: app_type_id - application type identifier msg - kafka message Returns: None """ LOG.info(f"Processing Event: {msg}") msg_data = None try: msg_data = cost_mgmt_msg_filter(msg) except SourceNotFoundError: LOG.warning(f"Source not found in platform sources. Skipping msg: {msg}") return if not msg_data: LOG.debug(f"Message not intended for cost management: {msg}") return if msg_data.get("event_type") in (KAFKA_APPLICATION_CREATE,): storage.create_source_event(msg_data.get("source_id"), msg_data.get("auth_header"), msg_data.get("offset")) if storage.is_known_source(msg_data.get("source_id")): sources_network_info(msg_data.get("source_id"), msg_data.get("auth_header")) elif msg_data.get("event_type") in (KAFKA_AUTHENTICATION_CREATE, KAFKA_AUTHENTICATION_UPDATE): if msg_data.get("event_type") in (KAFKA_AUTHENTICATION_CREATE,): storage.create_source_event( # this will create source _only_ if it does not exist. msg_data.get("source_id"), msg_data.get("auth_header"), msg_data.get("offset") ) save_auth_info(msg_data.get("auth_header"), msg_data.get("source_id")) elif msg_data.get("event_type") in (KAFKA_SOURCE_UPDATE,): if storage.is_known_source(msg_data.get("source_id")) is False: LOG.info("Update event for unknown source id, skipping...") return sources_network_info(msg_data.get("source_id"), msg_data.get("auth_header")) elif msg_data.get("event_type") in (KAFKA_APPLICATION_DESTROY,): storage.enqueue_source_delete(msg_data.get("source_id"), msg_data.get("offset"), allow_out_of_order=True) elif msg_data.get("event_type") in (KAFKA_SOURCE_DESTROY,): storage.enqueue_source_delete(msg_data.get("source_id"), msg_data.get("offset")) if msg_data.get("event_type") in (KAFKA_SOURCE_UPDATE, KAFKA_AUTHENTICATION_UPDATE): storage.enqueue_source_update(msg_data.get("source_id"))
async def process_messages(msg_pending_queue): # pragma: no cover """ Process messages from Platform-Sources kafka service. Handler for various application/source create and delete events. 'create' events: Issues a Sources REST API call to get additional context for the Platform-Sources kafka event. This information is stored in the Sources database table. 'destroy' events: Enqueues a source delete event which will be processed in the synchronize_sources method. Args: msg_pending_queue (Asyncio queue): Queue to hold kafka messages to be filtered Returns: None """ LOG.info('Waiting to process incoming kafka messages...') while True: msg_data = await msg_pending_queue.get() LOG.info(f'Processing Event: {str(msg_data)}') try: if msg_data.get('event_type') in (KAFKA_APPLICATION_CREATE, KAFKA_SOURCE_UPDATE): storage.create_provider_event(msg_data.get('source_id'), msg_data.get('auth_header'), msg_data.get('offset')) with concurrent.futures.ThreadPoolExecutor() as pool: await EVENT_LOOP.run_in_executor( pool, sources_network_info, msg_data.get('source_id'), msg_data.get('auth_header')) elif msg_data.get('event_type') in (KAFKA_AUTHENTICATION_CREATE, KAFKA_AUTHENTICATION_UPDATE): with concurrent.futures.ThreadPoolExecutor() as pool: await EVENT_LOOP.run_in_executor( pool, sources_network_auth_info, msg_data.get('resource_id'), msg_data.get('auth_header')) msg_data['source_id'] = storage.get_source_from_endpoint( msg_data.get('resource_id')) elif msg_data.get('event_type') in (KAFKA_APPLICATION_DESTROY, KAFKA_SOURCE_DESTROY): storage.enqueue_source_delete(msg_data.get('source_id')) if msg_data.get('event_type') in (KAFKA_SOURCE_UPDATE, KAFKA_AUTHENTICATION_UPDATE): storage.enqueue_source_update(msg_data.get('source_id')) except Exception as error: # The reason for catching all exceptions is to ensure that the event # loop remains active in the event that message processing fails unexpectedly. source_id = str(msg_data.get('source_id', 'unknown')) LOG.error( f'Source {source_id} Unexpected message processing error: {str(error)}' )
def test_enqueue_source_delete_db_down(self): """Tests enqueues source_delete with database error.""" test_source_id = 2 test_offset = 3 ocp_obj = Sources(source_id=test_source_id, offset=3, out_of_order_delete=False, pending_delete=False) ocp_obj.save() with patch.object(Sources, "save") as mock_object: mock_object.side_effect = InterfaceError("Error") with self.assertRaises(InterfaceError): storage.enqueue_source_delete(test_source_id, Config.SOURCES_FAKE_HEADER, test_offset)
def test_enqueue_source_delete(self): """Test for enqueuing source delete.""" test_source_id = 3 aws_obj = Sources(source_id=test_source_id, auth_header=self.test_header, offset=3, endpoint_id=4, source_type=Provider.PROVIDER_AWS, name='Test AWS Source', billing_source={'bucket': 'test-bucket'}) aws_obj.save() storage.enqueue_source_delete(test_source_id) response = Sources.objects.get(source_id=test_source_id) self.assertTrue(response.pending_delete)
def test_enqueue_source_delete(self): """Test for enqueuing source delete.""" test_source_id = 3 test_offset = 3 aws_obj = Sources( source_id=test_source_id, auth_header=self.test_header, offset=test_offset, source_type=Provider.PROVIDER_AWS, name="Test AWS Source", billing_source={"bucket": "test-bucket"}, ) aws_obj.save() storage.enqueue_source_delete(test_source_id, test_offset) response = Sources.objects.get(source_id=test_source_id) self.assertTrue(response.pending_delete)
def test_enqueue_source_delete(self): """Test for enqueuing source delete.""" test_source_id = 3 test_offset = 3 account_name = "Test Provider" provider_uuid = faker.uuid4() ocp_provider = Provider.objects.create( uuid=provider_uuid, name=account_name, type=Provider.PROVIDER_OCP, authentication=ProviderAuthentication.objects.create( credentials={ "cluster_id": "my-cluster'" }).save(), billing_source=ProviderBillingSource.objects.create( data_source={}), customer=Customer.objects.create(account_id="123", schema_name="myschema").save(), setup_complete=False, ) ocp_provider.save() ocp_obj = Sources( source_id=test_source_id, auth_header=self.test_header, offset=test_offset, source_type=Provider.PROVIDER_OCP, name=account_name, koku_uuid=ocp_provider.uuid, ) ocp_obj.save() storage.enqueue_source_delete(test_source_id, test_offset) source_response = Sources.objects.get(source_id=test_source_id) self.assertTrue(source_response.pending_delete) provider_response = Provider.objects.get(uuid=provider_uuid) self.assertFalse(provider_response.active) self.assertIsNone(provider_response.billing_source) self.assertIsNone(provider_response.authentication)
async def process_messages(msg_pending_queue): # noqa: C901; pragma: no cover """ Process messages from Platform-Sources kafka service. Handler for various application/source create and delete events. 'create' events: Issues a Sources REST API call to get additional context for the Platform-Sources kafka event. This information is stored in the Sources database table. 'destroy' events: Enqueues a source delete event which will be processed in the synchronize_sources method. Args: msg_pending_queue (Asyncio queue): Queue to hold kafka messages to be filtered Returns: None """ LOG.info("Waiting to process incoming kafka messages...") while True: msg_data = await msg_pending_queue.get() LOG.info(f"Processing Event: {str(msg_data)}") try: if msg_data.get("event_type") in (KAFKA_APPLICATION_CREATE, KAFKA_AUTHENTICATION_CREATE): if msg_data.get("event_type") == KAFKA_AUTHENTICATION_CREATE: sources_network = SourcesHTTPClient( msg_data.get("auth_header")) msg_data[ "source_id"] = sources_network.get_source_id_from_endpoint_id( msg_data.get("resource_id")) storage.create_source_event(msg_data.get("source_id"), msg_data.get("auth_header"), msg_data.get("offset")) with concurrent.futures.ThreadPoolExecutor() as pool: await EVENT_LOOP.run_in_executor( pool, sources_network_info, msg_data.get("source_id"), msg_data.get("auth_header")) elif msg_data.get("event_type") in (KAFKA_SOURCE_UPDATE, ): with concurrent.futures.ThreadPoolExecutor() as pool: if storage.is_known_source( msg_data.get("source_id")) is False: LOG.info( f"Update event for unknown source id, skipping...") continue await EVENT_LOOP.run_in_executor( pool, sources_network_info, msg_data.get("source_id"), msg_data.get("auth_header")) elif msg_data.get("event_type") in (KAFKA_AUTHENTICATION_UPDATE, ): msg_data["source_id"] = storage.get_source_from_endpoint( msg_data.get("resource_id")) with concurrent.futures.ThreadPoolExecutor() as pool: await EVENT_LOOP.run_in_executor( pool, save_auth_info, msg_data.get("auth_header"), msg_data.get("source_id")) elif msg_data.get("event_type") in (KAFKA_APPLICATION_DESTROY, KAFKA_SOURCE_DESTROY): storage.enqueue_source_delete(msg_data.get("source_id")) if msg_data.get("event_type") in (KAFKA_SOURCE_UPDATE, KAFKA_AUTHENTICATION_UPDATE): storage.enqueue_source_update(msg_data.get("source_id")) except (InterfaceError, OperationalError) as error: LOG.error( f"[process_messages] Closing DB connection and re-queueing failed operation." f" Encountered {type(error).__name__}: {error}") connection.close() await asyncio.sleep(Config.RETRY_SECONDS) await msg_pending_queue.put(msg_data) LOG.info( f'Requeued failed operation: {msg_data.get("event_type")} ' f'for Source ID: {str(msg_data.get("source_id"))}.') except Exception as error: # The reason for catching all exceptions is to ensure that the event # loop remains active in the event that message processing fails unexpectedly. source_id = str(msg_data.get("source_id", "unknown")) LOG.error( f"Source {source_id} Unexpected message processing error: {str(error)}", exc_info=True)