async def handle_message(self, event_msg): """Publish changes extracted from the event message. Args: event_msg (event_consumer.GEventMessage): Contains the changes to publish. Raises: InvalidDNSZoneInMessageError if the DNS zone of a resource record does not match our configured zone. """ msg_logger = _utils.GEventMessageLogger(self._logger, {'msg_id': event_msg.msg_id}) msg_logger.info('Publisher received new message.') if event_msg.data['resourceRecords']: for resource_record in event_msg.data['resourceRecords']: record_name = resource_record['name'] if not record_name.endswith('.' + self.config['dns_zone']): msg = ('Error when asserting zone for record: ' f'{record_name}.') msg_logger.error(msg) raise exceptions.InvalidDNSZoneInMessageError(msg) await self._dispatch_changes(resource_record, self.config['dns_zone'], event_msg.data['action'], msg_logger) else: msg = ( 'No records published or deleted as no resource records were' ' present.') msg_logger.info(msg) event_msg.append_to_history(msg, self.phase)
async def _handle_pubsub_msg(self, pubsub_msg): msg_id = pubsub_msg.message_id msg_logger = _utils.GEventMessageLogger(self._logger, {'msg_id': msg_id}) msg_logger.info('Received new message.') msg_datetime = pubsub_msg.publish_time # need a TZ-aware object in UTC cur_datetime = datetime.datetime.now(datetime.timezone.utc) msg_age = (cur_datetime - msg_datetime).total_seconds() if msg_age > self._max_msg_age: msg_logger.warn(f'Message is too old ({msg_age} seconds), ' 'acking and discarding.') context = { 'plugin': 'event-consumer', 'msg_id': msg_id, 'msg_age': msg_age } await self.metrics.incr('msg-too-old', context=context) pubsub_msg.ack() return try: data = json.loads(pubsub_msg.data) msg_logger.debug(f'Received data: {data}') except json.JSONDecodeError as e: msg = ('Issue loading message data as JSON. ' f'Given data: {pubsub_msg.data}') msg_logger.warn(f'{msg}') pubsub_msg.ack() return schema = self._get_and_validate_pubsub_msg_schema(data) metrics_schema = schema if schema else 'unknown' context = {'plugin': 'event-consumer', 'schema': metrics_schema} await self.metrics.incr('message-validated', context=context) if schema is None: msg_logger.warn('No schema found for message received, acking.') pubsub_msg.ack() return msg_logger.debug(f'Message is valid for "{schema}" schema.') event_msg_data = self._parser.parse(data, schema) event_msg = self._create_gevent_msg(pubsub_msg, event_msg_data, schema) msg_logger.debug(f'Adding message to the success channel.') coro = self.success_channel.put(event_msg) asyncio.run_coroutine_threadsafe(coro, self._loop)
async def handle_message(self, event_msg): """Ack Pub/Sub message and update event message history. Args: event_msg (GEventMessage): message to clean up """ msg_logger = _utils.GEventMessageLogger(self._logger, {'msg_id': event_msg.msg_id}) msg_logger.debug(f'Acking message.') # "blocking" method but just puts msg on a request queue that # google.cloud.pubsub manages with threads+grpc event_msg._pubsub_msg.ack() msg = 'Acknowledged message in Pub/Sub.' event_msg.append_to_history(msg, self.phase) msg_logger.info(f'Message is done processing.')
async def handle_message(self, event_message): """ Enrich message with extra context and send it to the publisher. When a message is successfully processed, it is passed to the :obj:`self.success_channel`. However, if there is a problem during processing, the message is passed to the :obj:`self.error_channel`. Args: event_message (.GEventMessage): message requiring additional information. """ # if the message has resource records, assume it has all info # it needs to be published, and therefore is already enriched if event_message.data['resourceRecords']: msg = 'Message already enriched, skipping phase.' event_message.append_to_history(msg, self.phase) return msg_logger = _utils.GEventMessageLogger( self._logger, {'msg_id': event_message.msg_id}) if event_message.data['action'] == 'additions': instance_data = await self._poll_for_instance_data( event_message.data['resourceName'], msg_logger) records = await self._create_rrecords(event_message.data, instance_data, msg_logger) elif event_message.data['action'] == 'deletions': instance_resource_url = event_message.data['resourceName'] records = await self._get_matching_records_for_deletion( instance_resource_url) msg_logger.debug(f'Enriched with resource record(s): {records}') event_message.data['resourceRecords'].extend(records) msg = ( f"Enriched msg with {len(event_message.data['resourceRecords'])}" " resource record(s).") event_message.append_to_history(msg, self.phase)