Exemplo n.º 1
0
 def from_message(cls, msg: confluent_kafka.Message) -> 'Metadata':
     return cls(
         topic=msg.topic(),
         partition=msg.partition(),
         offset=msg.offset(),
         timestamp=msg.timestamp()[1],
         key=msg.key(),
         _raw=msg,
     )
Exemplo n.º 2
0
 def _confluent_to_binary_message(
         self, consumed_message: Message) -> BinaryMessage:
     binary_message = BinaryMessage(
         key=consumed_message.key(),
         value=consumed_message.value(),
         partition=consumed_message.partition(),
         offset=consumed_message.offset(),
         timestamp=self._confluent_to_io_timestamp(consumed_message),
         headers=self._confluent_to_io_headers(consumed_message.headers()),
     )
     return binary_message
Exemplo n.º 3
0
def check_msg_equality(msgnum, expected, given: Message, expectFail=False):
    errs = []
    if expected['value'] != given.value():
        errs.append(f"Values do not match! Expected {expected['value']} got {given.value()}")
    if expected['key'] != given.key():
        errs.append(f"Keys do not match! Expected {expected['key']} got {given.key()}")
    if 'headers' in expected and expected['headers'] != given.headers():
        errs.append(f"Headers do not match! Expected {expected['headers']} got {given.headers()}")
    if expectFail:
        if len(errs) == 0:
            print(f"{msgnum}: expected ({expected}) matched given. Did not expect that")
            exit(1)
    elif len(errs) > 0:
        for err in errs:
            print(f"{msgnum}" + err)
        exit(255)
Exemplo n.º 4
0
    def handle(self, message: confluent_kafka.Message) -> None:
        context = self.baseplate.make_context_object()
        try:
            # We place the call to ``baseplate.make_server_span`` inside the
            # try/except block because we still want Baseplate to see and
            # handle the error (publish it to error reporting)
            with self.baseplate.make_server_span(
                    context, f"{self.name}.handler") as span:
                error = message.error()
                if error:
                    # this isn't a real message, but is an error from Kafka
                    raise ValueError(f"KafkaError: {error.str()}")

                topic = message.topic()
                offset = message.offset()
                partition = message.partition()

                span.set_tag("kind", "consumer")
                span.set_tag("kafka.topic", topic)
                span.set_tag("kafka.key", message.key())
                span.set_tag("kafka.partition", partition)
                span.set_tag("kafka.offset", offset)
                span.set_tag("kafka.timestamp", message.timestamp())

                blob: bytes = message.value()

                try:
                    data = self.message_unpack_fn(blob)
                except Exception:
                    logger.error("skipping invalid message")
                    context.span.incr_tag(
                        f"{self.name}.{topic}.invalid_message")
                    return

                try:
                    ingest_timestamp_ms = data["endpoint_timestamp"]
                    now_ms = int(time.time() * 1000)
                    message_latency = (now_ms - ingest_timestamp_ms) / 1000
                except (KeyError, TypeError):
                    # we can't guarantee that all publishers populate this field
                    # v2 events publishers (event collectors) do, but future
                    # kafka publishers may not
                    message_latency = None

                self.handler_fn(context, data, message)

                if self.on_success_fn:
                    self.on_success_fn(context, data, message)

                if message_latency is not None:
                    context.metrics.timer(f"{self.name}.{topic}.latency").send(
                        message_latency)

                context.metrics.gauge(
                    f"{self.name}.{topic}.offset.{partition}").replace(offset)
        except Exception:
            # let this exception crash the server so we'll stop processing messages
            # and won't commit offsets. when the server restarts it will get
            # this message again and try to process it.
            logger.exception(
                "Unhandled error while trying to process a message, terminating the server"
            )
            raise
Exemplo n.º 5
0
def kafka_producer_delivery_cb(err: confluent_kafka.KafkaError, msg: confluent_kafka.Message) -> None:
    if err is not None:
        logger.error("Delivery failed for record %s: %s", msg.key(), err)