示例#1
0
    def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):
        """
        Encode a ProduceRequest struct

        Arguments:
            payloads: list of ProduceRequestPayload
            acks: How "acky" you want the request to be
                1: written to disk by the leader
                0: immediate response
                -1: waits for all replicas to be in sync
            timeout: Maximum time (in ms) the server will wait for replica acks.
                This is _not_ a socket timeout

        Returns: ProduceRequest
        """
        if acks not in (1, 0, -1):
            raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks)

        return kafka.protocol.produce.ProduceRequest[0](
            required_acks=acks,
            timeout=timeout,
            topics=[(
                topic,
                [(
                    partition,
                    [(0, 0, kafka.protocol.message.Message(msg.value, key=msg.key,
                                                           magic=msg.magic,
                                                           attributes=msg.attributes))
                    for msg in payload.messages])
                for partition, payload in topic_payloads.items()])
            for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
示例#2
0
    def encode_offset_fetch_request(cls, client_id, correlation_id, group,
                                    payloads):
        """
        Encode some OffsetFetchRequest structs

        Params
        ======
        client_id: string
        correlation_id: int
        group: string, the consumer group you are fetching offsets for
        payloads: list of OffsetFetchRequest
        """
        grouped_payloads = group_by_topic_and_partition(payloads)
        message = cls._encode_message_header(client_id, correlation_id,
                                             KafkaProtocol.OFFSET_FETCH_KEY)

        message += write_short_string(group)
        message += struct.pack('>i', len(grouped_payloads))

        for topic, topic_payloads in grouped_payloads.items():
            message += write_short_string(topic)
            message += struct.pack('>i', len(topic_payloads))

            for partition, payload in topic_payloads.items():
                message += struct.pack('>i', partition)

        return struct.pack('>i%ds' % len(message), len(message), message)
示例#3
0
    def encode_produce_request(cls, client_id, correlation_id, payloads=None, acks=1, timeout=1000):
        """
        Encode some ProduceRequest structs

        Arguments:
            client_id: string
            correlation_id: int
            payloads: list of ProduceRequest
            acks: How "acky" you want the request to be
                0: immediate response
                1: written to disk by the leader
                2+: waits for this many number of replicas to sync
                -1: waits for all replicas to be in sync
            timeout: Maximum time the server will wait for acks from replicas.
                This is _not_ a socket timeout

        """
        payloads = [] if payloads is None else payloads
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(cls._encode_message_header(client_id, correlation_id, KafkaProtocol.PRODUCE_KEY))

        message.append(struct.pack(">hii", acks, timeout, len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(struct.pack(">h%dsi" % len(topic), len(topic), topic, len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                msg_set = KafkaProtocol._encode_message_set(payload.messages)
                message.append(struct.pack(">ii%ds" % len(msg_set), partition, len(msg_set), msg_set))

        msg = b"".join(message)
        return struct.pack(">i%ds" % len(msg), len(msg), msg)
示例#4
0
    def encode_offset_fetch_request(cls, client_id, correlation_id,
                                    group, payloads):
        """
        Encode some OffsetFetchRequest structs

        Params
        ======
        client_id: string
        correlation_id: int
        group: string, the consumer group you are fetching offsets for
        payloads: list of OffsetFetchRequest
        """
        grouped_payloads = group_by_topic_and_partition(payloads)
        message = cls._encode_message_header(client_id, correlation_id,
                                             KafkaProtocol.OFFSET_FETCH_KEY)

        message += write_short_string(group)
        message += struct.pack('>i', len(grouped_payloads))

        for topic, topic_payloads in grouped_payloads.items():
            message += write_short_string(topic)
            message += struct.pack('>i', len(topic_payloads))

            for partition, payload in topic_payloads.items():
                message += struct.pack('>i', partition)

        return struct.pack('>i%ds' % len(message), len(message), message)
示例#5
0
    def encode_offset_fetch_request(cls, client_id, correlation_id,
                                    group, payloads, from_kafka=False):
        """
        Encode some OffsetFetchRequest structs. The request is encoded using
        version 0 if from_kafka is false, indicating a request for Zookeeper
        offsets. It is encoded using version 1 otherwise, indicating a request
        for Kafka offsets.

        Arguments:
            client_id: string
            correlation_id: int
            group: string, the consumer group you are fetching offsets for
            payloads: list of OffsetFetchRequest
            from_kafka: bool, default False, set True for Kafka-committed offsets
        """
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        reqver = 1 if from_kafka else 0
        message.append(cls._encode_message_header(client_id, correlation_id,
                                                  KafkaProtocol.OFFSET_FETCH_KEY,
                                                  version=reqver))

        message.append(write_short_string(group))
        message.append(struct.pack('>i', len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack('>i', len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                message.append(struct.pack('>i', partition))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
示例#6
0
    def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096):
        """
        Encodes a FetchRequest struct

        Arguments:
            payloads: list of FetchRequestPayload
            max_wait_time (int, optional): ms to block waiting for min_bytes
                data. Defaults to 100.
            min_bytes (int, optional): minimum bytes required to return before
                max_wait_time. Defaults to 4096.

        Return: FetchRequest
        """
        return kafka.protocol.fetch.FetchRequest[0](
            replica_id=-1,
            max_wait_time=max_wait_time,
            min_bytes=min_bytes,
            topics=[(
                topic,
                [(
                    partition,
                    payload.offset,
                    payload.max_bytes)
                for partition, payload in topic_payloads.items()])
            for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
示例#7
0
    def encode_produce_request(cls, client_id, correlation_id, payloads=[], acks=1, timeout=1000):
        """
        Encode some ProduceRequest structs

        Params
        ======
        client_id: string
        correlation_id: string
        payloads: list of ProduceRequest
        acks: How "acky" you want the request to be
            0: immediate response
            1: written to disk by the leader
            2+: waits for this many number of replicas to sync
            -1: waits for all replicas to be in sync
        timeout: Maximum time the server will wait for acks from replicas. This is _not_ a socket timeout
        """
        grouped_payloads = group_by_topic_and_partition(payloads)
        message = cls._encode_message_header(client_id, correlation_id, KafkaProtocol.PRODUCE_KEY)
        message += struct.pack('>hii', acks, timeout, len(grouped_payloads))
        for topic, topic_payloads in grouped_payloads.items():
            message += struct.pack('>h%dsi' % len(topic), len(topic), topic, len(topic_payloads))
            for partition, payload in topic_payloads.items():
                message_set = KafkaProtocol._encode_message_set(payload.messages)
                message += struct.pack('>ii%ds' % len(message_set), partition, len(message_set), message_set)
        return struct.pack('>i%ds' % len(message), len(message), message)
示例#8
0
    def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):
        """
        Encode a ProduceRequest struct

        Arguments:
            payloads: list of ProduceRequestPayload
            acks: How "acky" you want the request to be
                1: written to disk by the leader
                0: immediate response
                -1: waits for all replicas to be in sync
            timeout: Maximum time (in ms) the server will wait for replica acks.
                This is _not_ a socket timeout

        Returns: ProduceRequest
        """
        if acks not in (1, 0, -1):
            raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' %
                             acks)

        return kafka.protocol.produce.ProduceRequest[0](
            required_acks=acks,
            timeout=timeout,
            topics=[(topic, [(partition,
                              [(0,
                                kafka.protocol.message.Message(
                                    msg.value,
                                    key=msg.key,
                                    magic=msg.magic,
                                    attributes=msg.attributes).encode())
                               for msg in payload.messages])
                             for partition, payload in topic_payloads.items()])
                    for topic, topic_payloads in group_by_topic_and_partition(
                        payloads).items()])
示例#9
0
    def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
                             max_wait_time=100, min_bytes=4096):
        """
        Encodes some FetchRequest structs

        Arguments:
            client_id: string
            correlation_id: int
            payloads: list of FetchRequest
            max_wait_time: int, how long to block waiting on min_bytes of data
            min_bytes: int, the minimum number of bytes to accumulate before
                       returning the response
        """

        payloads = [] if payloads is None else payloads
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(cls._encode_message_header(client_id, correlation_id,
                                                  KafkaProtocol.FETCH_KEY))

        # -1 is the replica id
        message.append(struct.pack('>iiii', -1, max_wait_time, min_bytes,
                                   len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack('>i', len(topic_payloads)))
            for partition, payload in topic_payloads.items():
                message.append(struct.pack('>iqi', partition, payload.offset,
                                           payload.max_bytes))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
示例#10
0
    def encode_offset_commit_request(cls, client_id, correlation_id,
                                     group, payloads):
        """
        Encode some OffsetCommitRequest structs

        Arguments:
            client_id: string
            correlation_id: int
            group: string, the consumer group you are committing offsets for
            payloads: list of OffsetCommitRequest
        """
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(cls._encode_message_header(client_id, correlation_id,
                                                  KafkaProtocol.OFFSET_COMMIT_KEY))
        message.append(write_short_string(group))
        message.append(struct.pack('>i', len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack('>i', len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                message.append(struct.pack('>iq', partition, payload.offset))
                message.append(write_short_string(payload.metadata))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
    def encode_fetch_request(cls,
                             payloads=(),
                             max_wait_time=100,
                             min_bytes=4096):
        """
        Encodes a FetchRequest struct

        Arguments:
            payloads: list of FetchRequestPayload
            max_wait_time (int, optional): ms to block waiting for min_bytes
                data. Defaults to 100.
            min_bytes (int, optional): minimum bytes required to return before
                max_wait_time. Defaults to 4096.

        Return: FetchRequest
        """
        return kafka.protocol.fetch.FetchRequest[0](
            replica_id=-1,
            max_wait_time=max_wait_time,
            min_bytes=min_bytes,
            topics=[(
                topic,
                [(partition, payload.offset, payload.max_bytes)
                 for partition, payload in topic_payloads.items()],
            ) for topic, topic_payloads in group_by_topic_and_partition(
                payloads).items()],
        )
示例#12
0
    def encode_offset_commit_request_kafka(cls, client_id, correlation_id,
                                           group, payloads):
        """
        Encode some OffsetCommitRequest structs
        Arguments:
            client_id: string
            correlation_id: int
            group: string, the consumer group you are committing offsets for
            payloads: list of OffsetCommitRequest
        """
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(
            cls._encode_message_header(client_id,
                                       correlation_id,
                                       KafkaProtocol.OFFSET_COMMIT_KEY,
                                       version=2))
        message.append(write_short_string(group))
        message.append(struct.pack('>i', -1))  # ConsumerGroupGenerationId
        message.append(write_short_string(''))  # ConsumerId
        message.append(struct.pack('>q', -1))  # Retention time
        message.append(struct.pack('>i', len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack('>i', len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                message.append(struct.pack('>iq', partition, payload.offset))
                message.append(write_short_string(payload.metadata))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
示例#13
0
    def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
                             max_wait_time=100, min_bytes=4096):
        """
        Encodes some FetchRequest structs

        Arguments:
            client_id: string
            correlation_id: int
            payloads: list of FetchRequest
            max_wait_time: int, how long to block waiting on min_bytes of data
            min_bytes: int, the minimum number of bytes to accumulate before
                       returning the response
        """

        payloads = [] if payloads is None else payloads
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(cls._encode_message_header(client_id, correlation_id,
                                                  KafkaProtocol.FETCH_KEY))

        # -1 is the replica id
        message.append(struct.pack('>iiii', -1, max_wait_time, min_bytes,
                                   len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack('>i', len(topic_payloads)))
            for partition, payload in topic_payloads.items():
                message.append(struct.pack('>iqi', partition, payload.offset,
                                           payload.max_bytes))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
示例#14
0
    def encode_offset_fetch_request(cls, client_id, correlation_id, group, payloads):
        """
        Encode some OffsetFetchRequest structs

        Arguments:
            client_id: string
            correlation_id: int
            group: string, the consumer group you are fetching offsets for
            payloads: list of OffsetFetchRequest
        """
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(cls._encode_message_header(client_id, correlation_id, KafkaProtocol.OFFSET_FETCH_KEY))

        message.append(write_short_string(group))
        message.append(struct.pack(">i", len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack(">i", len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                message.append(struct.pack(">i", partition))

        msg = b"".join(message)
        return struct.pack(">i%ds" % len(msg), len(msg), msg)
示例#15
0
 def encode_list_offset_request(cls, payloads=()):
     return kafka.protocol.offset.OffsetRequest[1](
         replica_id=-1,
         topics=[(topic,
                  [(partition, payload.time)
                   for partition, payload in six.iteritems(topic_payloads)])
                 for topic, topic_payloads in six.iteritems(
                     group_by_topic_and_partition(payloads))])
示例#16
0
 def encode_list_offset_request(cls, payloads=()):
     return kafka.protocol.offset.OffsetRequest[1](
         replica_id=-1,
         topics=[(
             topic,
             [(
                 partition,
                 payload.time)
             for partition, payload in six.iteritems(topic_payloads)])
         for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
示例#17
0
 def encode_offset_request(cls, client_id, correlation_id, payloads=[]):
     grouped_payloads = group_by_topic_and_partition(payloads)
     message = cls._encode_message_header(client_id, correlation_id, KafkaProtocol.OFFSET_KEY)
     message += struct.pack('>ii', -1, len(grouped_payloads)) # -1 is the replica id
     for topic, topic_payloads in grouped_payloads.items():
         message += write_short_string(topic)
         message += struct.pack('>i', len(topic_payloads))
         for partition, payload in topic_payloads.items():
             message += struct.pack('>iqi', partition, payload.time, payload.max_offsets)
     return struct.pack('>i%ds' % len(message), len(message), message)
示例#18
0
    def encode_offset_commit_request(cls, group, payloads):
        """
        Encode an OffsetCommitRequest struct

        Arguments:
            group: string, the consumer group you are committing offsets for
            payloads: list of OffsetCommitRequestPayload
        """
        return kafka.protocol.commit.OffsetCommitRequest[0](
            consumer_group=group,
            topics=[(topic,
                     [(partition, payload.offset, payload.metadata)
                      for partition, payload in six.iteritems(topic_payloads)])
                    for topic, topic_payloads in six.iteritems(
                        group_by_topic_and_partition(payloads))])
示例#19
0
    def encode_produce_request(cls,
                               client_id,
                               correlation_id,
                               payloads=None,
                               acks=1,
                               timeout=1000):
        """
        Encode some ProduceRequest structs

        Params
        ======
        client_id: string
        correlation_id: int
        payloads: list of ProduceRequest
        acks: How "acky" you want the request to be
            0: immediate response
            1: written to disk by the leader
            2+: waits for this many number of replicas to sync
            -1: waits for all replicas to be in sync
        timeout: Maximum time the server will wait for acks from replicas.
                 This is _not_ a socket timeout
        """
        payloads = [] if payloads is None else payloads
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(
            cls._encode_message_header(client_id, correlation_id,
                                       KafkaProtocol.PRODUCE_KEY))

        message.append(
            struct.pack('>hii', acks, timeout, len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(
                struct.pack('>h%dsi' % len(topic), len(topic), topic,
                            len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                msg_set = KafkaProtocol._encode_message_set(payload.messages)
                message.append(
                    struct.pack('>ii%ds' % len(msg_set), partition,
                                len(msg_set), msg_set))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
示例#20
0
    def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):
        """
        Encode an OffsetFetchRequest struct. The request is encoded using
        version 0 if from_kafka is false, indicating a request for Zookeeper
        offsets. It is encoded using version 1 otherwise, indicating a request
        for Kafka offsets.

        Arguments:
            group: string, the consumer group you are fetching offsets for
            payloads: list of OffsetFetchRequestPayload
            from_kafka: bool, default False, set True for Kafka-committed offsets
        """
        version = 1 if from_kafka else 0
        return kafka.protocol.commit.OffsetFetchRequest[version](
            consumer_group=group,
            topics=[(topic, list(topic_payloads.keys()))
                    for topic, topic_payloads in six.iteritems(
                        group_by_topic_and_partition(payloads))])
示例#21
0
    def encode_offset_commit_request(cls, group, payloads):
        """
        Encode an OffsetCommitRequest struct

        Arguments:
            group: string, the consumer group you are committing offsets for
            payloads: list of OffsetCommitRequestPayload
        """
        return kafka.protocol.commit.OffsetCommitRequest[0](
            consumer_group=group,
            topics=[(
                topic,
                [(
                    partition,
                    payload.offset,
                    payload.metadata)
                for partition, payload in six.iteritems(topic_payloads)])
            for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
示例#22
0
    def encode_offset_request(cls, client_id, correlation_id, payloads=None):
        payloads = [] if payloads is None else payloads
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = cls._encode_message_header(client_id, correlation_id,
                                             KafkaProtocol.OFFSET_KEY)

        # -1 is the replica id
        message += struct.pack('>ii', -1, len(grouped_payloads))

        for topic, topic_payloads in grouped_payloads.items():
            message += write_short_string(topic)
            message += struct.pack('>i', len(topic_payloads))

            for partition, payload in topic_payloads.items():
                message += struct.pack('>iqi', partition, payload.time,
                                       payload.max_offsets)

        return struct.pack('>i%ds' % len(message), len(message), message)
示例#23
0
    def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):
        """
        Encode an OffsetFetchRequest struct. The request is encoded using
        version 0 if from_kafka is false, indicating a request for Zookeeper
        offsets. It is encoded using version 1 otherwise, indicating a request
        for Kafka offsets.

        Arguments:
            group: string, the consumer group you are fetching offsets for
            payloads: list of OffsetFetchRequestPayload
            from_kafka: bool, default False, set True for Kafka-committed offsets
        """
        version = 1 if from_kafka else 0
        return kafka.protocol.commit.OffsetFetchRequest[version](
            consumer_group=group,
            topics=[(
                topic,
                list(topic_payloads.keys()))
            for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
示例#24
0
 def encode_offset_commit_request_kafka(cls, group, payloads):
     """
     Encode an OffsetCommitRequest struct
     Arguments:
         group: string, the consumer group you are committing offsets for
         payloads: list of OffsetCommitRequestPayload
     """
     return kafka.protocol.commit.OffsetCommitRequest[2](
         consumer_group=group,
         consumer_group_generation_id=kafka.protocol.commit.
         OffsetCommitRequest[2].DEFAULT_GENERATION_ID,
         consumer_id='',
         retention_time=kafka.protocol.commit.OffsetCommitRequest[2].
         DEFAULT_RETENTION_TIME,
         topics=[(topic,
                  [(partition, payload.offset, payload.metadata)
                   for partition, payload in six.iteritems(topic_payloads)])
                 for topic, topic_payloads in six.iteritems(
                     group_by_topic_and_partition(payloads))])
示例#25
0
    def encode_offset_request(cls, client_id, correlation_id, payloads=None):
        payloads = [] if payloads is None else payloads
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        message.append(cls._encode_message_header(client_id, correlation_id, KafkaProtocol.OFFSET_KEY))

        # -1 is the replica id
        message.append(struct.pack(">ii", -1, len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack(">i", len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                message.append(struct.pack(">iqi", partition, payload.time, payload.max_offsets))

        msg = b"".join(message)
        return struct.pack(">i%ds" % len(msg), len(msg), msg)
示例#26
0
 def encode_offset_commit_request_kafka(cls, group, payloads):
     """
     Encode an OffsetCommitRequest struct
     Arguments:
         group: string, the consumer group you are committing offsets for
         payloads: list of OffsetCommitRequestPayload
     """
     return kafka.protocol.commit.OffsetCommitRequest[2](
         consumer_group=group,
         consumer_group_generation_id=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_GENERATION_ID,
         consumer_id='',
         retention_time=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
         topics=[(
             topic,
             [(
                 partition,
                 payload.offset,
                 payload.metadata)
                 for partition, payload in six.iteritems(topic_payloads)])
             for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
示例#27
0
    def encode_offset_fetch_request(cls,
                                    client_id,
                                    correlation_id,
                                    group,
                                    payloads,
                                    from_kafka=False):
        """
        Encode some OffsetFetchRequest structs. The request is encoded using
        version 0 if from_kafka is false, indicating a request for Zookeeper
        offsets. It is encoded using version 1 otherwise, indicating a request
        for Kafka offsets.

        Arguments:
            client_id: string
            correlation_id: int
            group: string, the consumer group you are fetching offsets for
            payloads: list of OffsetFetchRequest
            from_kafka: bool, default False, set True for Kafka-committed offsets
        """
        grouped_payloads = group_by_topic_and_partition(payloads)

        message = []
        reqver = 1 if from_kafka else 0
        message.append(
            cls._encode_message_header(client_id,
                                       correlation_id,
                                       KafkaProtocol.OFFSET_FETCH_KEY,
                                       version=reqver))

        message.append(write_short_string(group))
        message.append(struct.pack('>i', len(grouped_payloads)))

        for topic, topic_payloads in grouped_payloads.items():
            message.append(write_short_string(topic))
            message.append(struct.pack('>i', len(topic_payloads)))

            for partition, payload in topic_payloads.items():
                message.append(struct.pack('>i', partition))

        msg = b''.join(message)
        return struct.pack('>i%ds' % len(msg), len(msg), msg)
示例#28
0
    def encode_fetch_request(cls, client_id, correlation_id, payloads=[], max_wait_time=100, min_bytes=4096):
        """
        Encodes some FetchRequest structs

        Params
        ======
        client_id: string
        correlation_id: string
        payloads: list of FetchRequest
        max_wait_time: int, how long to block waiting on min_bytes of data
        min_bytes: int, the minimum number of bytes to accumulate before returning the response
        """
        
        grouped_payloads = group_by_topic_and_partition(payloads)
        message = cls._encode_message_header(client_id, correlation_id, KafkaProtocol.FETCH_KEY)
        message += struct.pack('>iiii', -1, max_wait_time, min_bytes, len(grouped_payloads)) # -1 is the replica id
        for topic, topic_payloads in grouped_payloads.items():
            message += write_short_string(topic)
            message += struct.pack('>i', len(topic_payloads))
            for partition, payload in topic_payloads.items():
                message += struct.pack('>iqi', partition, payload.offset, payload.max_bytes)
        return struct.pack('>i%ds' % len(message), len(message), message)