Пример #1
0
def test_compressed_buffer_close(compression):
    records = MessageSetBuffer(io.BytesIO(), 100000, compression_type=compression)
    orig_msg = Message(b'foobar')
    records.append(1234, orig_msg)
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 0
    assert msg.is_compressed()

    msgset = msg.decompress()
    (offset, size, msg) = msgset[0]
    assert not msg.is_compressed()
    assert offset == 1234
    assert msg == orig_msg

    # Closing again should work fine
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 0
    assert msg.is_compressed()

    msgset = msg.decompress()
    (offset, size, msg) = msgset[0]
    assert not msg.is_compressed()
    assert offset == 1234
    assert msg == orig_msg
Пример #2
0
def test_produce_request(sender, mocker, api_version, produce_version):
    sender.config['api_version'] = api_version
    tp = TopicPartition('foo', 0)
    records = MessageSetBuffer(io.BytesIO(), 100000)
    batch = RecordBatch(tp, records)
    produce_request = sender._produce_request(0, 0, 0, [batch])
    assert isinstance(produce_request, ProduceRequest[produce_version])
Пример #3
0
def test_buffer_close():
    records = MessageSetBuffer(io.BytesIO(), 100000)
    orig_msg = Message(b'foobar')
    records.append(1234, orig_msg)
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 1234 
    assert msg == orig_msg

    # Closing again should work fine
    records.close()

    msgset = MessageSet.decode(records.buffer())
    assert len(msgset) == 1
    (offset, size, msg) = msgset[0]
    assert offset == 1234
    assert msg == orig_msg
Пример #4
0
    def add_message(self, tp, key, value, timeout):
        """Add message to batch by topic-partition
        If batch is already full this method waits (`ttl` seconds maximum)
        until batch is drained by send task
        """
        if self._closed:
            # this can happen when producer is closing but try to send some
            # messages in async task
            raise ProducerClosed()

        batch = self._batches.get(tp)
        if not batch:
            message_set_buffer = MessageSetBuffer(io.BytesIO(),
                                                  self._batch_size,
                                                  self._compression_type)
            batch = MessageBatch(tp, message_set_buffer, self._batch_ttl,
                                 self._loop)
            self._batches[tp] = batch

            if not self._wait_data_future.done():
                # Wakeup sender task if it waits for data
                self._wait_data_future.set_result(None)

        future = batch.append(key, value)
        if future is None:
            # Batch is full, can't append data atm,
            # waiting until batch per topic-partition is drained
            start = self._loop.time()
            yield from asyncio.wait([batch.wait_drain()],
                                    timeout=timeout,
                                    loop=self._loop)
            timeout -= self._loop.time() - start
            if timeout <= 0:
                raise KafkaTimeoutError()
            return (yield from self.add_message(tp, key, value, timeout))
        return future