Exemplo n.º 1
0
    def _build(self):
        if self._closed:
            self._buffer.seek(0)
            return self._buffer

        self._closed = True
        memview = self._buffer.getbuffer()
        if self._compression_type:
            _, compressor, attrs = self._COMPRESSORS[self._compression_type]
            msg = Message(compressor(memview[4:].tobytes()),
                          attributes=attrs,
                          magic=self._magic)
            encoded = msg.encode()
            # if compressed message is longer than original
            # we should send it as is (not compressed)
            header_size = 16  # 4(all size) + 8(offset) + 4(compressed size)
            if len(encoded) + header_size < len(memview):
                # write compressed message set (with header) to buffer
                # using memory view (for avoid memory copying)
                memview[:4] = Int32.encode(len(encoded) + 12)
                memview[4:12] = Int64.encode(0)  # offset 0
                memview[12:16] = Int32.encode(len(encoded))
                memview[16:16 + len(encoded)] = encoded

                memview.release()
                self._buffer.seek(16 + len(encoded))
                self._buffer.truncate()
                self._buffer.seek(0)
                return self._buffer

        # update batch size (first 4 bytes of buffer)
        memview[:4] = Int32.encode(self._buffer.tell() - 4)
        self._buffer.seek(0)
        return self._buffer
Exemplo n.º 2
0
    def append(self, key, value, timestamp_ms):
        """Append message (key and value) to batch

        Returns:
            None if batch is full
              or
            asyncio.Future that will resolved when message is delivered
        """
        if self._is_full(key, value):
            return None

        # `.encode()` is a weak method for some reason, so we need to save
        # reference before calling it.
        if self._version_id == 0:
            msg_inst = Message(value, key=key, magic=self._version_id)
        else:
            msg_inst = Message(value,
                               key=key,
                               magic=self._version_id,
                               timestamp=timestamp_ms)

        encoded = msg_inst.encode()
        msg = Int64.encode(self._relative_offset) + Int32.encode(len(encoded))
        msg += encoded
        self._buffer.write(msg)

        future = asyncio.Future(loop=self._loop)
        self._msg_futures.append(future)
        self._relative_offset += 1
        return future
Exemplo n.º 3
0
def test_decode_message_set_partial():
    encoded = b''.join([
        struct.pack('>q', 0),  # Msg Offset
        struct.pack('>i', 18),  # Msg Size
        struct.pack('>i', 1474775406),  # CRC
        struct.pack('>bb', 0, 0),  # Magic, flags
        struct.pack('>i', 2),  # Length of key
        b'k1',  # Key
        struct.pack('>i', 2),  # Length of value
        b'v1',  # Value
        struct.pack('>q', 1),  # Msg Offset
        struct.pack('>i', 24),  # Msg Size (larger than remaining MsgSet size)
        struct.pack('>i', -16383415),  # CRC
        struct.pack('>bb', 0, 0),  # Magic, flags
        struct.pack('>i', 2),  # Length of key
        b'k2',  # Key
        struct.pack('>i', 8),  # Length of value
        b'ar',  # Value (truncated)
    ])

    msgs = MessageSet.decode(encoded, bytes_to_read=len(encoded))
    assert len(msgs) == 2
    msg1, msg2 = msgs

    returned_offset1, message1_size, decoded_message1 = msg1
    returned_offset2, message2_size, decoded_message2 = msg2

    assert returned_offset1 == 0
    message1 = Message(b'v1', key=b'k1')
    message1.encode()
    assert decoded_message1 == message1

    assert returned_offset2 is None
    assert message2_size is None
    assert decoded_message2 == PartialMessage()
Exemplo n.º 4
0
    def drain_ready(self):
        """Compress batch to be ready for send"""
        memview = self._buffer.getbuffer()
        self._drain_waiter.set_result(None)
        if self._compression_type:
            _, compressor, attrs = self._COMPRESSORS[self._compression_type]
            msg = Message(compressor(memview[4:].tobytes()), attributes=attrs,
                          magic=self._version_id)
            encoded = msg.encode()
            # if compressed message is longer than original
            # we should send it as is (not compressed)
            header_size = 16   # 4(all size) + 8(offset) + 4(compressed size)
            if len(encoded) + header_size < len(memview):
                # write compressed message set (with header) to buffer
                # using memory view (for avoid memory copying)
                memview[:4] = Int32.encode(len(encoded) + 12)
                memview[4:12] = Int64.encode(0)  # offset 0
                memview[12:16] = Int32.encode(len(encoded))
                memview[16:16 + len(encoded)] = encoded
                self._buffer.seek(0)
                return

        # update batch size (first 4 bytes of buffer)
        memview[:4] = Int32.encode(self._buffer.tell() - 4)
        self._buffer.seek(0)
Exemplo n.º 5
0
def test_encode_message_set():
    messages = [
        Message(b'v1', key=b'k1'),
        Message(b'v2', key=b'k2')
    ]
    encoded = MessageSet.encode([(0, msg.encode())
                                 for msg in messages],
                                size=False)
    expect = b''.join([
        struct.pack('>q', 0),          # MsgSet Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', 1474775406), # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k1',                          # Key
        struct.pack('>i', 2),          # Length of value
        b'v1',                          # Value

        struct.pack('>q', 0),          # MsgSet Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', -16383415),  # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k2',                          # Key
        struct.pack('>i', 2),          # Length of value
        b'v2',                          # Value
    ])
    assert encoded == expect
Exemplo n.º 6
0
def test__unpack_message_set_compressed_v1(fetcher):
    fetcher.config['check_crcs'] = False
    tp = TopicPartition('foo', 0)
    messages = [
        (0, None, Message(b'a')),
        (1, None, Message(b'b')),
        (2, None, Message(b'c')),
    ]
    message_bytes = []
    for offset, _, m in messages:
        encoded = m.encode()
        message_bytes.append(
            Int64.encode(offset) + Int32.encode(len(encoded)) + encoded)
    compressed_bytes = gzip_encode(b''.join(message_bytes))
    compressed_base_offset = 10
    compressed_msgs = [(compressed_base_offset, None,
                        Message(compressed_bytes,
                                magic=1,
                                attributes=Message.CODEC_GZIP))]
    records = list(fetcher._unpack_message_set(tp, compressed_msgs))
    assert len(records) == 3
    assert all(map(lambda x: isinstance(x, ConsumerRecord), records))
    assert records[0].value == b'a'
    assert records[1].value == b'b'
    assert records[2].value == b'c'
    assert records[0].offset == 8
    assert records[1].offset == 9
    assert records[2].offset == 10
Exemplo n.º 7
0
def test_encode_message_v0():
    message = Message(b'test', key=b'key')
    encoded = message.encode()
    expect = b''.join([
        struct.pack('>i', -1427009701), # CRC
        struct.pack('>bb', 0, 0),       # Magic, flags
        struct.pack('>i', 3),           # Length of key
        b'key',                         # key
        struct.pack('>i', 4),           # Length of value
        b'test',                        # value
    ])
    assert encoded == expect
Exemplo n.º 8
0
def test_encode_message_v0():
    message = Message(b'test', key=b'key')
    encoded = message.encode()
    expect = b''.join([
        struct.pack('>i', -1427009701),  # CRC
        struct.pack('>bb', 0, 0),  # Magic, flags
        struct.pack('>i', 3),  # Length of key
        b'key',  # key
        struct.pack('>i', 4),  # Length of value
        b'test',  # value
    ])
    assert encoded == expect
Exemplo n.º 9
0
def test_encode_message_v1():
    message = Message(b'test', key=b'key', magic=1, timestamp=1234)
    encoded = message.encode()
    expect = b''.join([
        struct.pack('>i', 1331087195),  # CRC
        struct.pack('>bb', 1, 0),       # Magic, flags
        struct.pack('>q', 1234),        # Timestamp
        struct.pack('>i', 3),           # Length of key
        b'key',                         # key
        struct.pack('>i', 4),           # Length of value
        b'test',                        # value
    ])
    assert encoded == expect
Exemplo n.º 10
0
def test_encode_message_v1():
    message = Message(b'test', key=b'key', magic=1, timestamp=1234)
    encoded = message.encode()
    expect = b''.join([
        struct.pack('>i', 1331087195),  # CRC
        struct.pack('>bb', 1, 0),  # Magic, flags
        struct.pack('>q', 1234),  # Timestamp
        struct.pack('>i', 3),  # Length of key
        b'key',  # key
        struct.pack('>i', 4),  # Length of value
        b'test',  # value
    ])
    assert encoded == expect
Exemplo n.º 11
0
def test_decode_message():
    encoded = b''.join([
        struct.pack('>i', -1427009701),  # CRC
        struct.pack('>bb', 0, 0),  # Magic, flags
        struct.pack('>i', 3),  # Length of key
        b'key',  # key
        struct.pack('>i', 4),  # Length of value
        b'test',  # value
    ])
    decoded_message = Message.decode(encoded)
    msg = Message(b'test', key=b'key')
    msg.encode()  # crc is recalculated during encoding
    assert decoded_message == msg
Exemplo n.º 12
0
def test_decode_message():
    encoded = b''.join([
        struct.pack('>i', -1427009701), # CRC
        struct.pack('>bb', 0, 0),       # Magic, flags
        struct.pack('>i', 3),           # Length of key
        b'key',                         # key
        struct.pack('>i', 4),           # Length of value
        b'test',                        # value
    ])
    decoded_message = Message.decode(encoded)
    msg = Message(b'test', key=b'key')
    msg.encode() # crc is recalculated during encoding
    assert decoded_message == msg
Exemplo n.º 13
0
def test__unpack_message_set(fetcher):
    fetcher.config['check_crcs'] = False
    tp = TopicPartition('foo', 0)
    messages = [(0, None, Message(b'a')), (1, None, Message(b'b')),
                (2, None, Message(b'c'))]
    records = list(fetcher._unpack_message_set(tp, messages))
    assert len(records) == 3
    assert all(map(lambda x: isinstance(x, ConsumerRecord), records))
    assert records[0].value == b'a'
    assert records[1].value == b'b'
    assert records[2].value == b'c'
    assert records[0].offset == 0
    assert records[1].offset == 1
    assert records[2].offset == 2
Exemplo n.º 14
0
def test_compressed_buffer_close(compression):
    records = MessageSetBuffer(io.BytesIO(), 100000, compression_type=compression)
    orig_msg = Message(b'foobar')
    records.append(1234, orig_msg)
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 0
    assert msg.is_compressed()

    msgset = msg.decompress()
    (offset, size, msg) = msgset[0]
    assert not msg.is_compressed()
    assert offset == 1234
    assert msg == orig_msg

    # Closing again should work fine
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 0
    assert msg.is_compressed()

    msgset = msg.decompress()
    (offset, size, msg) = msgset[0]
    assert not msg.is_compressed()
    assert offset == 1234
    assert msg == orig_msg
Exemplo n.º 15
0
def test_create_message():
    payload = b'test'
    key = b'key'
    msg = Message(payload, key=key)
    assert msg.magic == 0
    assert msg.attributes == 0
    assert msg.key == key
    assert msg.value == payload
Exemplo n.º 16
0
def test__parse_record(fetcher):
    tp = TopicPartition('foo', 0)
    record = fetcher._parse_record(tp, 123, 456, Message(b'abc'))
    assert record.topic == 'foo'
    assert record.partition == 0
    assert record.offset == 123
    assert record.timestamp == 456
    assert record.value == b'abc'
    assert record.key is None
Exemplo n.º 17
0
def test__parse_fetched_data__stale_offset(fetcher, topic, mocker):
    fetcher.config['check_crcs'] = False
    tp = TopicPartition(topic, 0)
    msgs = []
    for i in range(10):
        msg = Message(b'foo')
        msgs.append((i, -1, msg))
    completed_fetch = CompletedFetch(tp, 10, 0, [0, 100, msgs],
                                     mocker.MagicMock())
    partition_record = fetcher._parse_fetched_data(completed_fetch)
    assert partition_record is None
Exemplo n.º 18
0
    def test_send_without_response(self):
        """Imitate producer without acknowledge, in this case client produces
        messages and kafka does not send response, and we make sure that
        futures do not stuck in queue forever"""

        host, port = self.kafka_host, self.kafka_port
        conn = yield from create_conn(host, port, loop=self.loop)

        # prepare message
        msg = Message(b'foo')
        request = ProduceRequest(required_acks=0,
                                 timeout=10 * 1000,
                                 topics=[(b'foo', [(0, [(0, msg.encode())])])])

        # produce messages without acknowledge
        for i in range(100):
            conn.send(request, expect_response=False)
        # make sure futures no stuck in queue
        self.assertEqual(len(conn._requests), 0)
        conn.close()
Exemplo n.º 19
0
    def append(self, *, timestamp, key, value):
        if not self._has_room_for(key, value):
            return 0

        # `.encode()` is a weak method for some reason, so we need to save
        # reference before calling it.
        if self._magic == 0:
            msg_inst = Message(value, key=key, magic=self._magic)
        else:
            msg_inst = Message(value,
                               key=key,
                               magic=self._magic,
                               timestamp=timestamp)

        encoded = msg_inst.encode()
        msg = Int64.encode(self._relative_offset) + Int32.encode(len(encoded))
        msg += encoded
        actual_size = self._buffer.write(msg)
        self._relative_offset += 1
        return actual_size
Exemplo n.º 20
0
def test__parse_fetched_data(fetcher, topic, mocker):
    fetcher.config['check_crcs'] = False
    tp = TopicPartition(topic, 0)
    msgs = []
    for i in range(10):
        msg = Message(b'foo')
        msgs.append((i, -1, msg))
    completed_fetch = CompletedFetch(tp, 0, 0, [0, 100, msgs],
                                     mocker.MagicMock())
    partition_record = fetcher._parse_fetched_data(completed_fetch)
    assert isinstance(partition_record, fetcher.PartitionRecords)
    assert len(partition_record) == 10
Exemplo n.º 21
0
def test_decode_message_validate_crc():
    encoded = b''.join([
        struct.pack('>i', -1427009701), # CRC
        struct.pack('>bb', 0, 0),       # Magic, flags
        struct.pack('>i', 3),           # Length of key
        b'key',                         # key
        struct.pack('>i', 4),           # Length of value
        b'test',                        # value
    ])
    decoded_message = Message.decode(encoded)
    assert decoded_message.validate_crc() is True

    encoded = b''.join([
        struct.pack('>i', 1234),           # Incorrect CRC
        struct.pack('>bb', 0, 0),       # Magic, flags
        struct.pack('>i', 3),           # Length of key
        b'key',                         # key
        struct.pack('>i', 4),           # Length of value
        b'test',                        # value
    ])
    decoded_message = Message.decode(encoded)
    assert decoded_message.validate_crc() is False
Exemplo n.º 22
0
def test_decode_message_validate_crc():
    encoded = b''.join([
        struct.pack('>i', -1427009701),  # CRC
        struct.pack('>bb', 0, 0),  # Magic, flags
        struct.pack('>i', 3),  # Length of key
        b'key',  # key
        struct.pack('>i', 4),  # Length of value
        b'test',  # value
    ])
    decoded_message = Message.decode(encoded)
    assert decoded_message.validate_crc() is True

    encoded = b''.join([
        struct.pack('>i', 1234),  # Incorrect CRC
        struct.pack('>bb', 0, 0),  # Magic, flags
        struct.pack('>i', 3),  # Length of key
        b'key',  # key
        struct.pack('>i', 4),  # Length of value
        b'test',  # value
    ])
    decoded_message = Message.decode(encoded)
    assert decoded_message.validate_crc() is False
Exemplo n.º 23
0
def test_decode_message_set_partial():
    encoded = b''.join([
        struct.pack('>q', 0),          # Msg Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', 1474775406), # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k1',                         # Key
        struct.pack('>i', 2),          # Length of value
        b'v1',                         # Value

        struct.pack('>q', 1),          # Msg Offset
        struct.pack('>i', 24),         # Msg Size (larger than remaining MsgSet size)
        struct.pack('>i', -16383415),  # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k2',                         # Key
        struct.pack('>i', 8),          # Length of value
        b'ar',                         # Value (truncated)
    ])

    msgs = MessageSet.decode(encoded, bytes_to_read=len(encoded))
    assert len(msgs) == 2
    msg1, msg2 = msgs

    returned_offset1, message1_size, decoded_message1 = msg1
    returned_offset2, message2_size, decoded_message2 = msg2

    assert returned_offset1 == 0
    message1 = Message(b'v1', key=b'k1')
    message1.encode()
    assert decoded_message1 == message1

    assert returned_offset2 is None
    assert message2_size is None
    assert decoded_message2 == PartialMessage()
Exemplo n.º 24
0
def test_fetched_records(fetcher, topic, mocker):
    fetcher.config['check_crcs'] = False
    tp = TopicPartition(topic, 0)
    msgs = []
    for i in range(10):
        msg = Message(b'foo')
        msgs.append((i, -1, msg))
    completed_fetch = CompletedFetch(tp, 0, 0, [0, 100, msgs],
                                     mocker.MagicMock())
    fetcher._completed_fetches.append(completed_fetch)
    records, partial = fetcher.fetched_records()
    assert tp in records
    assert len(records[tp]) == len(msgs)
    assert all(map(lambda x: isinstance(x, ConsumerRecord), records[tp]))
    assert partial is False
Exemplo n.º 25
0
def test__message_generator(fetcher, topic, mocker):
    fetcher.config['check_crcs'] = False
    tp = TopicPartition(topic, 0)
    msgs = []
    for i in range(10):
        msg = Message(b'foo')
        msgs.append((i, -1, msg))
    completed_fetch = CompletedFetch(tp, 0, 0, [0, 100, msgs],
                                     mocker.MagicMock())
    fetcher._completed_fetches.append(completed_fetch)
    for i in range(10):
        msg = next(fetcher)
        assert isinstance(msg, ConsumerRecord)
        assert msg.offset == i
        assert msg.value == b'foo'
Exemplo n.º 26
0
    def append(self, key, value):
        """Append message (key and value) to batch

        Returns:
            None if batch is full
              or
            asyncio.Future that will resolved when message is delivered
        """
        if not self._records.has_room_for(key, value):
            return None
        self._records.append(self._relative_offset, Message(value, key=key))
        future = asyncio.Future(loop=self._loop)
        self._msg_futures.append(future)
        self._relative_offset += 1
        return future
Exemplo n.º 27
0
    def drain_ready(self):
        """Compress batch to be ready for send"""
        memview = self._buffer.getbuffer()
        self._drain_waiter.set_result(None)
        if self._compression_type:
            _, compressor, attrs = self._COMPRESSORS[self._compression_type]
            msg = Message(compressor(memview[4:].tobytes()), attributes=attrs)
            encoded = msg.encode()
            # if compressed message is longer than original
            # we should send it as is (not compressed)
            header_size = 16   # 4(all size) + 8(offset) + 4(compressed size)
            if len(encoded) + header_size < len(memview):
                # write compressed message set (with header) to buffer
                # using memory view (for avoid memory copying)
                memview[:4] = Int32.encode(len(encoded) + 12)
                memview[4:12] = Int64.encode(0)  # offset 0
                memview[12:16] = Int32.encode(len(encoded))
                memview[16:16+len(encoded)] = encoded
                self._buffer.seek(0)
                return

        # update batch size (first 4 bytes of buffer)
        memview[:4] = Int32.encode(self._buffer.tell()-4)
        self._buffer.seek(0)
Exemplo n.º 28
0
    def append(self, key, value):
        """Append message (key and value) to batch

        Returns:
            None if batch is full
              or
            asyncio.Future that will resolved when message is delivered
        """
        if self._is_full(key, value):
            return None

        encoded = Message(value, key=key).encode()
        msg = Int64.encode(self._relative_offset) + Int32.encode(len(encoded))
        msg += encoded
        self._buffer.write(msg)

        future = asyncio.Future(loop=self._loop)
        self._msg_futures.append(future)
        self._relative_offset += 1
        return future
Exemplo n.º 29
0
def test_buffer_close():
    records = MessageSetBuffer(io.BytesIO(), 100000)
    orig_msg = Message(b'foobar')
    records.append(1234, orig_msg)
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 1234 
    assert msg == orig_msg

    # Closing again should work fine
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 1234
    assert msg == orig_msg
Exemplo n.º 30
0
def test_decode_message_set():
    encoded = b''.join([
        struct.pack('>q', 0),          # MsgSet Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', 1474775406), # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k1',                         # Key
        struct.pack('>i', 2),          # Length of value
        b'v1',                         # Value

        struct.pack('>q', 1),          # MsgSet Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', -16383415),  # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k2',                         # Key
        struct.pack('>i', 2),          # Length of value
        b'v2',                         # Value
    ])

    msgs = MessageSet.decode(encoded, bytes_to_read=len(encoded))
    assert len(msgs) == 2
    msg1, msg2 = msgs

    returned_offset1, message1_size, decoded_message1 = msg1
    returned_offset2, message2_size, decoded_message2 = msg2

    assert returned_offset1 == 0
    message1 = Message(b'v1', key=b'k1')
    message1.encode()
    assert decoded_message1 == message1

    assert returned_offset2 == 1
    message2 = Message(b'v2', key=b'k2')
    message2.encode()
    assert decoded_message2 == message2
Exemplo n.º 31
0
def test_decode_message_set():
    encoded = b''.join([
        struct.pack('>q', 0),          # MsgSet Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', 1474775406), # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k1',                         # Key
        struct.pack('>i', 2),          # Length of value
        b'v1',                         # Value

        struct.pack('>q', 1),          # MsgSet Offset
        struct.pack('>i', 18),         # Msg Size
        struct.pack('>i', -16383415),  # CRC
        struct.pack('>bb', 0, 0),      # Magic, flags
        struct.pack('>i', 2),          # Length of key
        b'k2',                         # Key
        struct.pack('>i', 2),          # Length of value
        b'v2',                         # Value
    ])

    msgs = MessageSet.decode(encoded, bytes_to_read=len(encoded))
    assert len(msgs) == 2
    msg1, msg2 = msgs

    returned_offset1, message1_size, decoded_message1 = msg1
    returned_offset2, message2_size, decoded_message2 = msg2

    assert returned_offset1 == 0
    message1 = Message(b'v1', key=b'k1')
    message1.encode()
    assert decoded_message1 == message1

    assert returned_offset2 == 1
    message2 = Message(b'v2', key=b'k2')
    message2.encode()
    assert decoded_message2 == message2
Exemplo n.º 32
0
    def test_compacted_topic_consumption(self):
        # Compacted topics can have offsets skipped
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=[])
        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        req = FetchRequest(
            -1,  # replica_id
            100,
            100,
            [(tp.topic, [(tp.partition, 155, 100000)])])
        msg1 = Message(b"12345", key=b"1")
        msg1._encode_self()
        msg2 = Message(b"23456", key=b"2")
        msg2._encode_self()
        msg3 = Message(b"34567", key=b"3")
        msg3._encode_self()
        resp = FetchResponse([(
            'test',
            [(
                0,
                0,
                3000,  # partition, error_code, highwater_offset
                [
                    (160, 5, msg1),  # offset, len_bytes, bytes
                    (162, 5, msg2),
                    (167, 5, msg3),
                ])])])

        client.send.side_effect = asyncio.coroutine(lambda n, r: resp)
        state = TopicPartitionState()
        state.seek(155)
        state.drop_pending_message_set = False
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)

        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        # Test successful getone
        first = buf.getone()
        self.assertEqual(state.position, 161)
        self.assertEqual((first.value, first.key, first.offset),
                         (msg1.value, msg1.key, 160))

        # Test successful getmany
        second, third = buf.getall()
        self.assertEqual(state.position, 168)
        self.assertEqual((second.value, second.key, second.offset),
                         (msg2.value, msg2.key, 162))
        self.assertEqual((third.value, third.key, third.offset),
                         (msg3.value, msg3.key, 167))
Exemplo n.º 33
0
    def test_proc_fetch_request(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 155, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [tp_info])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()
        msg = Message(b"test msg")
        msg._encode_self()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 0, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone(), None)  # invalid offset, msg is ignored

        state.seek(4)
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # error -> no partition found
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 3, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> topic auth failed
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 29, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            yield from fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, -1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(state.is_fetchable(), False)

        state.seek(4)
        subscriptions._default_offset_reset_strategy = OffsetResetStrategy.NONE
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            yield from fetcher.next_record([])

        yield from fetcher.close()
Exemplo n.º 34
0
    def test_proc_fetch_request(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 155, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100,
            100,
            [tp_info])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()
        msg = Message(b"test msg")
        msg._encode_self()
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 0, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone(), None)  # invalid offset, msg is ignored

        state.seek(4)
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # error -> no partition found
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 3, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> topic auth failed
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 29, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            yield from fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, -1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(state.is_fetchable(), False)

        state.seek(4)
        subscriptions._default_offset_reset_strategy = OffsetResetStrategy.NONE
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            yield from fetcher.next_record([])

        yield from fetcher.close()