async def _process_message(self, msg: Message): """ De-serialize message and execute service. :param msg: Kafka message. :type msg: confluent_kafka.Message` """ LOGGER.info( 'Processing Message(topic={}, partition={}, offset={}) ...'.format( msg.topic, msg.partition, msg.offset)) service_repr = get_call_repr(self._service) LOGGER.info('Executing job {}'.format(service_repr)) try: message_value = _decode_msg_value(msg.value()) res = await self._exec_service(message_value) except KeyboardInterrupt: LOGGER.error('Job was interrupted: {}'.format(msg.offset())) except Exception as err: LOGGER.exception('Job {} raised an exception: {}'.format( msg.offset(), err)) await self._producer.produce(topic=self._failed_topic, value=msg.value(), error=str(err)) else: LOGGER.info('Job {} returned: {}'.format(msg.offset(), res))
def _process_message(self, msg: KafkaMessage): err = msg.error() if err: if err.code() == KafkaError._PARTITION_EOF: return None else: monitoring.got_counter("kafka_consumer_exception") params = { "code": err.code(), "pid": os.getpid(), "topic": msg.topic(), "partition": msg.partition(), "offset": msg.offset(), log_const.KEY_NAME: log_const.EXCEPTION_VALUE } log( "KafkaConsumer Error %(code)s at pid %(pid)s: topic=%(topic)s partition=[%(partition)s] " "reached end at offset %(offset)s\n", params=params, level="WARNING") raise KafkaException(err) if msg.value(): if msg.headers() is None: msg.set_headers([]) return msg
async def _process_message(self, msg: Message): """ De-serialize message and execute service. :param msg: Kafka message. :type msg: confluent_kafka.Message` """ LOGGER.info( 'Processing Message(topic={}, partition={}, offset={}) ...'.format( msg.topic, msg.partition, msg.offset)) errors = [] service_repr = get_call_repr(self._service) # TODO get schedule from args # First element in tuple is count, the second is timeout in sec. schedule = [(3, 5), (2, 60), (2, 60 * 5), (1, 60 * 60), (3, 60 * 60 * 3)] LOGGER.info('Executing job {}'.format(service_repr)) try: message_value = _decode_msg_value(msg.value()) for count, timeout in schedule: res, exec_error = await self._retry_exec( count, timeout, message_value) if res: LOGGER.info( 'Successful re-processing of the message {} by the service {}' .format(message_value, self.service)) break else: errors.extend(exec_error) if len(errors) > 0: await self._producer.produce(topic=self._failed_topic, value=msg.value(), error=str(errors)) except KeyboardInterrupt: LOGGER.error('Job was interrupted: {}'.format(msg.offset())) except Exception as err: LOGGER.exception('Job {} raised an exception: {}'.format( msg.offset(), err)) errors.append(err) # For handle analyse and process message await self._producer.produce(topic=self._failed_topic, value=msg.value(), error=str(errors)) else: LOGGER.info('Job {} finished'.format(msg.offset()))
def update_callback(self, err: Optional[cimpl.KafkaError], msg: cimpl.Message): assert err is None, f"Received KafkaError {err}." self.binary_value = msg.value() self.binary_key = msg.key() self.partition = msg.partition() self.offset = msg.offset() self.timestamp = msg.timestamp()[1]
def decode_message(message: Message) -> DecodedMessage: if message.key() is None: decoded_key = None else: decoded_key = message.key().decode("utf-8") decoded_value = message.value().decode("utf-8") headers = [] if message.headers(): for header_key, header_value in message.headers(): headers.append(MessageHeader(key=header_key, value=header_value.decode("utf-8") if header_value else None)) return DecodedMessage( key=decoded_key, value=decoded_value, partition=message.partition(), offset=message.offset(), timestamp=str(message.timestamp()), headers=headers, )