async def test_consumer_several_transactions(self):
        producer = AIOKafkaProducer(bootstrap_servers=self.hosts,
                                    transactional_id="sobaka_producer")
        await producer.start()
        self.add_cleanup(producer.stop)

        msgs = []
        for i in range(10):
            await producer.begin_transaction()
            msg = b'Hello ' + str(i).encode()
            await producer.send(self.topic, msg, partition=0)
            if i % 3 == 0:
                await producer.commit_transaction()
                msgs.append(msg)
            else:
                await producer.abort_transaction()

        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset="earliest",
                                    isolation_level="read_committed")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        async for msg in consumer:
            self.assertEqual(msg.value, msgs.pop(0))
            if not msgs:
                break

        with self.assertRaises(asyncio.TimeoutError):
            await asyncio.wait_for(consumer.getone(), timeout=0.5)
    async def test_consumer_several_transactions(self):
        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            transactional_id="sobaka_producer")
        await producer.start()
        self.add_cleanup(producer.stop)

        msgs = []
        for i in range(10):
            await producer.begin_transaction()
            msg = b'Hello ' + str(i).encode()
            await producer.send(self.topic, msg, partition=0)
            if i % 3 == 0:
                await producer.commit_transaction()
                msgs.append(msg)
            else:
                await producer.abort_transaction()

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest",
            isolation_level="read_committed")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        async for msg in consumer:
            self.assertEqual(msg.value, msgs.pop(0))
            if not msgs:
                break

        with self.assertRaises(asyncio.TimeoutError):
            await asyncio.wait_for(
                consumer.getone(), timeout=0.5, loop=self.loop)
Exemple #3
0
    async def test_producer_transactional_send_offsets_to_transaction(self):
        # This is a pair test of Consume - To - Produce processing. We consume
        # a batch, process, produce with Procuder and send commit through
        # Producer also. At the end commit the transaction through Producer.
        # This will update commit point in Consumer too.

        # Setup some messages in INPUT topic
        await self.send_messages(0, list(range(0, 100)))
        await self.send_messages(1, list(range(100, 200)))
        in_topic = self.topic
        out_topic = self.topic + "-out"
        group_id = self.topic + "-group"

        consumer = AIOKafkaConsumer(in_topic,
                                    loop=self.loop,
                                    bootstrap_servers=self.hosts,
                                    enable_auto_commit=False,
                                    group_id=group_id,
                                    auto_offset_reset="earliest")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        producer = AIOKafkaProducer(loop=self.loop,
                                    bootstrap_servers=self.hosts,
                                    transactional_id="sobaka_producer",
                                    client_id="p1")
        await producer.start()
        self.add_cleanup(producer.stop)

        assignment = consumer.assignment()
        self.assertTrue(assignment)
        for tp in assignment:
            await consumer.commit({tp: 0})
            offset_before = await consumer.committed(tp)
            self.assertEqual(offset_before, 0)

        async def transform():
            while True:
                batch = await consumer.getmany(timeout_ms=5000, max_records=20)
                if not batch:
                    break
                async with producer.transaction():
                    offsets = {}
                    for tp, msgs in batch.items():
                        for msg in msgs:
                            out_msg = b"OUT-" + msg.value
                            # We produce to the same partition
                            producer.send(out_topic,
                                          value=out_msg,
                                          partition=tp.partition)
                        offsets[tp] = msg.offset + 1
                    await producer.send_offsets_to_transaction(
                        offsets, group_id)

        await transform()
        for tp in assignment:
            offset = await consumer.committed(tp)
            self.assertEqual(offset, 100)
    async def test_producer_transactional_send_offsets_to_transaction(self):
        # This is a pair test of Consume - To - Produce processing. We consume
        # a batch, process, produce with Procuder and send commit through
        # Producer also. At the end commit the transaction through Producer.
        # This will update commit point in Consumer too.

        # Setup some messages in INPUT topic
        await self.send_messages(0, list(range(0, 100)))
        await self.send_messages(1, list(range(100, 200)))
        in_topic = self.topic
        out_topic = self.topic + "-out"
        group_id = self.topic + "-group"

        consumer = AIOKafkaConsumer(
            in_topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            enable_auto_commit=False,
            group_id=group_id,
            auto_offset_reset="earliest")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            transactional_id="sobaka_producer", client_id="p1")
        await producer.start()
        self.add_cleanup(producer.stop)

        assignment = consumer.assignment()
        self.assertTrue(assignment)
        for tp in assignment:
            await consumer.commit({tp: 0})
            offset_before = await consumer.committed(tp)
            self.assertEqual(offset_before, 0)

        async def transform():
            while True:
                batch = await consumer.getmany(timeout_ms=5000, max_records=20)
                if not batch:
                    break
                async with producer.transaction():
                    offsets = {}
                    for tp, msgs in batch.items():
                        for msg in msgs:
                            out_msg = b"OUT-" + msg.value
                            # We produce to the same partition
                            producer.send(
                                out_topic, value=out_msg,
                                partition=tp.partition)
                        offsets[tp] = msg.offset + 1
                    await producer.send_offsets_to_transaction(
                        offsets, group_id)

        await transform()
        for tp in assignment:
            offset = await consumer.committed(tp)
            self.assertEqual(offset, 100)
Exemple #5
0
 def test_consumer_group_without_subscription(self):
     consumer = AIOKafkaConsumer(loop=self.loop,
                                 group_id='group-{}'.format(self.id()),
                                 bootstrap_servers=self.hosts,
                                 enable_auto_commit=False,
                                 auto_offset_reset='earliest',
                                 heartbeat_interval_ms=100)
     yield from consumer.start()
     yield from asyncio.sleep(0.2, loop=self.loop)
     yield from consumer.stop()
Exemple #6
0
    def test_unknown_topic_or_partition(self):
        consumer = AIOKafkaConsumer(
            loop=self.loop, group_id=None,
            bootstrap_servers=self.hosts, auto_offset_reset='earliest',
            enable_auto_commit=False)
        yield from consumer.start()

        with self.assertRaises(UnknownTopicOrPartitionError):
            yield from consumer.assign([TopicPartition(self.topic, 2222)])
        yield from consumer.stop()
Exemple #7
0
    def test_unknown_topic_or_partition(self):
        consumer = AIOKafkaConsumer(
            loop=self.loop, group_id=None,
            bootstrap_servers=self.hosts, auto_offset_reset='earliest',
            enable_auto_commit=False)
        consumer.subscribe(topics=('some_topic_unknown',))
        with self.assertRaises(UnknownTopicOrPartitionError):
            yield from consumer.start()

        with self.assertRaises(UnknownTopicOrPartitionError):
            yield from consumer.assign([TopicPartition(self.topic, 2222)])
Exemple #8
0
    def test_consumer_stop_cancels_pending_position_fetches(self):
        consumer = AIOKafkaConsumer(self.topic,
                                    loop=self.loop,
                                    bootstrap_servers=self.hosts,
                                    group_id='group-%s' % self.id())
        yield from consumer.start()
        self.add_cleanup(consumer.stop)

        self.assertTrue(consumer._pending_position_fetches)
        pending_task = list(consumer._pending_position_fetches)[0]
        yield from consumer.stop()
        self.assertTrue(pending_task.cancelled())
Exemple #9
0
 def test_consumer_arguments(self):
     with self.assertRaisesRegexp(
             ValueError, "`security_protocol` should be SSL or PLAINTEXT"):
         AIOKafkaConsumer(
             self.topic, loop=self.loop,
             bootstrap_servers=self.hosts,
             security_protocol="SOME")
     with self.assertRaisesRegexp(
             ValueError, "`ssl_context` is mandatory if "
                         "security_protocol=='SSL'"):
         AIOKafkaConsumer(
             self.topic, loop=self.loop,
             bootstrap_servers=self.hosts,
             security_protocol="SSL", ssl_context=None)
Exemple #10
0
 def consumer_factory(self, **kwargs):
     enable_auto_commit = kwargs.pop('enable_auto_commit', True)
     auto_offset_reset = kwargs.pop('auto_offset_reset', 'earliest')
     group = kwargs.pop('group', 'group-%s' % self.id())
     consumer = AIOKafkaConsumer(
         self.topic, loop=self.loop, group_id=group,
         bootstrap_servers=self.hosts,
         enable_auto_commit=enable_auto_commit,
         auto_offset_reset=auto_offset_reset,
         **kwargs)
     yield from consumer.start()
     if group is not None:
         yield from consumer.seek_to_committed()
     return consumer
Exemple #11
0
 def consumer_factory(self, **kwargs):
     enable_auto_commit = kwargs.pop('enable_auto_commit', True)
     auto_offset_reset = kwargs.pop('auto_offset_reset', 'earliest')
     group = kwargs.pop('group', 'group-%s' % self.id())
     consumer = AIOKafkaConsumer(
         self.topic, loop=self.loop, group_id=group,
         bootstrap_servers=self.hosts,
         enable_auto_commit=enable_auto_commit,
         auto_offset_reset=auto_offset_reset,
         **kwargs)
     yield from consumer.start()
     if group is not None:
         yield from consumer.seek_to_committed()
     return consumer
    async def test_producer_transactional_simple(self):
        # The test here will just check if we can do simple produce with
        # transactional_id option and minimal setup.

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            transactional_id="sobaka_producer")
        await producer.start()
        self.add_cleanup(producer.stop)

        async with producer.transaction():
            meta = await producer.send_and_wait(
                self.topic, b'hello, Kafka!')

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest")
        await consumer.start()
        self.add_cleanup(consumer.stop)
        msg = await consumer.getone()
        self.assertEqual(msg.offset, meta.offset)
        self.assertEqual(msg.timestamp, meta.timestamp)
        self.assertEqual(msg.value, b"hello, Kafka!")
        self.assertEqual(msg.key, None)
Exemple #13
0
    def test_offset_reset_manual(self):
        yield from self.send_messages(0, list(range(0, 10)))

        consumer = AIOKafkaConsumer(
            self.topic,
            loop=self.loop, bootstrap_servers=self.hosts,
            metadata_max_age_ms=200, group_id="offset_reset_group",
            auto_offset_reset="none")
        yield from consumer.start()
        self.add_cleanup(consumer.stop)

        with self.assertRaises(OffsetOutOfRangeError):
            yield from consumer.getmany(timeout_ms=1000)

        with self.assertRaises(OffsetOutOfRangeError):
            yield from consumer.getone()
Exemple #14
0
    async def test_exception_ignored_with_aiter(self):
        # Test relies on MessageTooLarge error, which is no more in
        # Kafka 0.10.1+. So we pin the API version here to 0.9

        l_msgs = [random_string(10), random_string(50000)]
        large_messages = await self.send_messages(0, l_msgs)
        r_msgs = [random_string(50)]
        small_messages = await self.send_messages(0, r_msgs)

        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset='earliest',
                                    max_partition_fetch_bytes=4000,
                                    api_version="0.9")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        messages = []
        with self.assertLogs('aiokafka.consumer.consumer',
                             level='ERROR') as cm:
            async for m in consumer:
                messages.append(m)
                if len(messages) == 2:
                    # Flake8==3.0.3 gives
                    #   F999 'break' outside loop
                    # for `async` syntax
                    break  # noqa

            self.assertEqual(len(cm.output), 1)
            self.assertTrue(
                'ERROR:aiokafka.consumer.consumer:error in consumer iterator'
                in cm.output[0])
        self.assertEqual(messages[0].value, large_messages[0])
        self.assertEqual(messages[1].value, small_messages[0])
Exemple #15
0
    async def test_consumer_stops_iter(self):
        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset="earliest")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        async def iterator():
            async for msg in consumer:  # pragma: no cover
                assert False, "No items should be here, got {}".format(msg)

        task = create_task(iterator())
        await asyncio.sleep(0.1)
        # As we didn't input any data into Kafka
        self.assertFalse(task.done())

        await consumer.stop()
        # Should just stop iterator, no errors
        await task
        # But creating another iterator should result in an error, we can't
        # have dead loops like:
        #
        #   while True:
        #     async for msg in consumer:
        #       print(msg)
        with self.assertRaises(ConsumerStoppedError):
            await iterator()
 async def test_consumer_transactions_not_supported(self):
     consumer = AIOKafkaConsumer(
         loop=self.loop, bootstrap_servers=self.hosts,
         isolation_level="read_committed")
     with self.assertRaises(UnsupportedVersionError):
         await consumer.start()
     await consumer.stop()
Exemple #17
0
    async def test_exception_ignored_with_aiter(self):
        l_msgs = [random_string(10), random_string(50000)]
        large_messages = await self.send_messages(0, l_msgs)
        r_msgs = [random_string(50)]
        small_messages = await self.send_messages(0, r_msgs)

        consumer = AIOKafkaConsumer(self.topic,
                                    loop=self.loop,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset='earliest',
                                    max_partition_fetch_bytes=4000)
        await consumer.start()

        messages = []
        with self.assertLogs('aiokafka.consumer', level='ERROR') as cm:
            async for m in consumer:
                messages.append(m)
                if len(messages) == 2:
                    # Flake8==3.0.3 gives
                    #   F999 'break' outside loop
                    # for `async` syntax
                    break  # noqa

            self.assertEqual(len(cm.output), 1)
            self.assertTrue(
                'ERROR:aiokafka.consumer:error in consumer iterator' in
                cm.output[0])
        self.assertEqual(messages[0].value, large_messages[0])
        self.assertEqual(messages[1].value, small_messages[0])
        await consumer.stop()
Exemple #18
0
 def get_consumer(self, topic: str):
     """Create consumer based on the metadata."""
     return AIOKafkaConsumer(
         topic,
         loop=asyncio.get_event_loop(),
         value_deserializer=_deserialize,
         **self.as_kwargs(),
     )
    async def test_consumer_transactional_abort(self):
        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            transactional_id="sobaka_producer")
        await producer.start()
        self.add_cleanup(producer.stop)

        producer2 = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts)
        await producer2.start()
        self.add_cleanup(producer2.stop)

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest",
            isolation_level="read_committed")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        # We will produce from a transactional producer and then from a
        # non-transactional. This should block consumption on that partition
        # until transaction is committed.
        await producer.begin_transaction()
        await producer.send_and_wait(
            self.topic, b'Hello from transaction', partition=0)

        meta2 = await producer2.send_and_wait(
            self.topic, b'Hello from non-transaction', partition=0)

        # The transaction blocked consumption
        task = self.loop.create_task(consumer.getone())
        await asyncio.sleep(1, loop=self.loop)
        self.assertFalse(task.done())

        tp = TopicPartition(self.topic, 0)
        self.assertEqual(consumer.last_stable_offset(tp), 0)
        self.assertEqual(consumer.highwater(tp), 2)

        await producer.abort_transaction()

        # Order should be preserved. We first yield the first message, although
        # it belongs to a committed afterwards transaction
        msg = await task
        self.assertEqual(msg.offset, meta2.offset)
        self.assertEqual(msg.timestamp, meta2.timestamp)
        self.assertEqual(msg.value, b"Hello from non-transaction")
        self.assertEqual(msg.key, None)

        with self.assertRaises(asyncio.TimeoutError):
            await asyncio.wait_for(
                consumer.getone(), timeout=0.5, loop=self.loop)

        tp = TopicPartition(self.topic, 0)
        self.assertEqual(consumer.last_stable_offset(tp), 3)
        self.assertEqual(consumer.highwater(tp), 3)
Exemple #20
0
    def listen_flights(self, loop):
        """
        Listen a Kafka topic to gather flight information and stores it in a cache. Wrapper method to be used as a
        thread function.
        """
        consumer = AIOKafkaConsumer(self.FLIGHTS_TOPIC, loop=loop, group_id='flights_task',
                                    bootstrap_servers=self.kafka_bootstrap_servers,
                                    value_deserializer=partial(self.deserialize, schema=self.flights_schema))

        asyncio.run_coroutine_threadsafe(self._listen_flights(consumer), loop)
Exemple #21
0
    def test_consumer_wait_topic(self):
        topic = "some-test-topic-for-autocreate"
        consumer = AIOKafkaConsumer(
            topic, loop=self.loop, bootstrap_servers=self.hosts)
        yield from consumer.start()
        consume_task = self.loop.create_task(consumer.getone())
        # just to be sure getone does not fail (before produce)
        yield from asyncio.sleep(0.5, loop=self.loop)
        self.assertFalse(consume_task.done())

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts)
        yield from producer.start()
        yield from producer.send(topic, b'test msg')
        yield from producer.stop()

        data = yield from consume_task
        self.assertEqual(data.value, b'test msg')
        yield from consumer.stop()
Exemple #22
0
    def test_ssl_consume(self):
        # Produce by PLAINTEXT, Consume by SSL
        # Send 3 messages
        yield from self.send_messages(0, [1, 2, 3])

        context = self.create_ssl_context()
        group = "group-{}".format(self.id())
        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop, group_id=group,
            bootstrap_servers=[
                "{}:{}".format(self.kafka_host, self.kafka_ssl_port)],
            enable_auto_commit=True,
            auto_offset_reset="earliest",
            security_protocol="SSL", ssl_context=context)
        yield from consumer.start()
        results = yield from consumer.getmany(timeout_ms=1000)
        [msgs] = results.values()  # only 1 partition anyway
        msgs = [msg.value for msg in msgs]
        self.assertEqual(msgs, [b"1", b"2", b"3"])
        yield from consumer.stop()
    async def _test_control_record(self, isolation_level):
        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            transactional_id="sobaka_producer")
        await producer.start()
        self.add_cleanup(producer.stop)

        async with producer.transaction():
            meta = await producer.send_and_wait(
                self.topic, b'Hello from transaction', partition=0)

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest",
            isolation_level=isolation_level,
            fetch_max_bytes=10)
        await consumer.start()
        self.add_cleanup(consumer.stop)

        # Transaction marker will be next after the message
        consumer.seek(meta.topic_partition, meta.offset + 1)

        with self.assertRaises(asyncio.TimeoutError):
            await asyncio.wait_for(
                consumer.getone(), timeout=0.5, loop=self.loop)

        # We must not be stuck on previous position
        position = await consumer.position(meta.topic_partition)
        self.assertEqual(position, meta.offset + 2)

        # After producing some more data it should resume consumption
        async with producer.transaction():
            meta2 = await producer.send_and_wait(
                self.topic, b'Hello from transaction 2', partition=0)

        msg = await consumer.getone()
        self.assertEqual(msg.offset, meta2.offset)
        self.assertEqual(msg.timestamp, meta2.timestamp)
        self.assertEqual(msg.value, b"Hello from transaction 2")
        self.assertEqual(msg.key, None)
Exemple #24
0
    async def test_exception_in_aiter(self):
        await self.send_messages(0, [b'test'])

        consumer = AIOKafkaConsumer(self.topic,
                                    loop=self.loop,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset=None)
        await consumer.start()

        with self.assertRaises(OffsetOutOfRangeError):
            async for m in consumer:
                print(m)
Exemple #25
0
    async def test_exception_in_aiter(self):
        await self.send_messages(0, [b'test'])

        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset="none")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        with self.assertRaises(NoOffsetForPartitionError):
            async for m in consumer:
                m  # pragma: no cover
Exemple #26
0
    def test_producer_indempotence_no_duplicates(self):
        # Indempotent producer should retry produce in case of timeout error
        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            enable_idempotence=True,
            request_timeout_ms=2000)
        yield from producer.start()
        self.add_cleanup(producer.stop)

        original_send = producer.client.send
        retry = [0]

        @asyncio.coroutine
        def mocked_send(*args, **kw):
            result = yield from original_send(*args, **kw)
            if result.API_KEY == ProduceResponse[0].API_KEY and retry[0] < 2:
                retry[0] += 1
                raise RequestTimedOutError
            return result

        with mock.patch.object(producer.client, 'send') as mocked:
            mocked.side_effect = mocked_send

            meta = yield from producer.send_and_wait(
                self.topic, b'hello, Kafka!')

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest")
        yield from consumer.start()
        self.add_cleanup(consumer.stop)
        msg = yield from consumer.getone()
        self.assertEqual(msg.offset, meta.offset)
        self.assertEqual(msg.timestamp, meta.timestamp)
        self.assertEqual(msg.value, b"hello, Kafka!")
        self.assertEqual(msg.key, None)

        with self.assertRaises(asyncio.TimeoutError):
            yield from asyncio.wait_for(consumer.getone(), timeout=0.5)
Exemple #27
0
    def test_producer_indempotence_no_duplicates(self):
        # Indempotent producer should retry produce in case of timeout error
        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            enable_idempotence=True,
            request_timeout_ms=2000)
        yield from producer.start()
        self.add_cleanup(producer.stop)

        original_send = producer.client.send
        retry = [0]

        @asyncio.coroutine
        def mocked_send(*args, **kw):
            result = yield from original_send(*args, **kw)
            if result.API_KEY == ProduceResponse[0].API_KEY and retry[0] < 2:
                retry[0] += 1
                raise RequestTimedOutError
            return result

        with mock.patch.object(producer.client, 'send') as mocked:
            mocked.side_effect = mocked_send

            meta = yield from producer.send_and_wait(
                self.topic, b'hello, Kafka!')

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest")
        yield from consumer.start()
        self.add_cleanup(consumer.stop)
        msg = yield from consumer.getone()
        self.assertEqual(msg.offset, meta.offset)
        self.assertEqual(msg.timestamp, meta.timestamp)
        self.assertEqual(msg.value, b"hello, Kafka!")
        self.assertEqual(msg.key, None)

        with self.assertRaises(asyncio.TimeoutError):
            yield from asyncio.wait_for(consumer.getone(), timeout=0.5)
Exemple #28
0
    def test_producer_ssl(self):
        # Produce by SSL consume by PLAINTEXT
        topic = "test_ssl_produce"
        context = self.create_ssl_context()
        producer = AIOKafkaProducer(
            loop=self.loop,
            bootstrap_servers=[
                "{}:{}".format(self.kafka_host, self.kafka_ssl_port)],
            security_protocol="SSL", ssl_context=context)
        yield from producer.start()
        yield from producer.send_and_wait(topic=topic, value=b"Super msg")
        yield from producer.stop()

        consumer = AIOKafkaConsumer(
            topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            enable_auto_commit=True,
            auto_offset_reset="earliest")
        yield from consumer.start()
        msg = yield from consumer.getone()
        self.assertEqual(msg.value, b"Super msg")
        yield from consumer.stop()
Exemple #29
0
    def test_consumer_subscribe_pattern_autocreate_no_group_id(self):
        pattern = "^no-group-pattern-.*$"
        consumer = AIOKafkaConsumer(loop=self.loop,
                                    bootstrap_servers=self.hosts,
                                    metadata_max_age_ms=200,
                                    group_id=None,
                                    fetch_max_wait_ms=50,
                                    auto_offset_reset="earliest")
        self.add_cleanup(consumer.stop)
        yield from consumer.start()
        consumer.subscribe(pattern=pattern)
        # Start getter for the topics. Should not create any topics
        consume_task = self.loop.create_task(consumer.getone())
        yield from asyncio.sleep(0.3, loop=self.loop)
        self.assertFalse(consume_task.done())
        self.assertEqual(consumer.subscription(), set())

        # Now lets autocreate the topic by fetching metadata for it.
        producer = AIOKafkaProducer(loop=self.loop,
                                    bootstrap_servers=self.hosts)
        self.add_cleanup(producer.stop)
        yield from producer.start()
        my_topic = "no-group-pattern-1"
        yield from producer.client._wait_on_metadata(my_topic)
        # Wait for consumer to refresh metadata with new topic
        yield from asyncio.sleep(0.3, loop=self.loop)
        self.assertFalse(consume_task.done())
        self.assertTrue(consumer._client.cluster.topics() >= {my_topic})
        self.assertEqual(consumer.subscription(), {my_topic})

        # Add another topic
        my_topic2 = "no-group-pattern-2"
        yield from producer.client._wait_on_metadata(my_topic2)
        # Wait for consumer to refresh metadata with new topic
        yield from asyncio.sleep(0.3, loop=self.loop)
        self.assertFalse(consume_task.done())
        self.assertTrue(
            consumer._client.cluster.topics() >= {my_topic, my_topic2})
        self.assertEqual(consumer.subscription(), {my_topic, my_topic2})

        # Now lets actualy produce some data and verify that it is consumed
        yield from producer.send(my_topic, b'test msg')
        data = yield from consume_task
        self.assertEqual(data.value, b'test msg')
Exemple #30
0
    def test_producer_ssl(self):
        # Produce by SSL consume by PLAINTEXT
        topic = "test_ssl_produce"
        context = self.create_ssl_context()
        producer = AIOKafkaProducer(
            loop=self.loop,
            bootstrap_servers=[
                "{}:{}".format(self.kafka_host, self.kafka_ssl_port)],
            security_protocol="SSL", ssl_context=context)
        yield from producer.start()
        yield from producer.send_and_wait(topic=topic, value=b"Super msg")
        yield from producer.stop()

        consumer = AIOKafkaConsumer(
            topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            enable_auto_commit=True,
            auto_offset_reset="earliest")
        yield from consumer.start()
        msg = yield from consumer.getone()
        self.assertEqual(msg.value, b"Super msg")
        yield from consumer.stop()
Exemple #31
0
    def test_consumer_seek_on_unassigned(self):
        tp0 = TopicPartition(self.topic, 0)
        tp1 = TopicPartition(self.topic, 1)
        consumer = AIOKafkaConsumer(loop=self.loop,
                                    group_id=None,
                                    bootstrap_servers=self.hosts)
        yield from consumer.start()
        self.add_cleanup(consumer.stop)
        consumer.assign([tp0])

        with self.assertRaises(IllegalStateError):
            yield from consumer.seek_to_beginning(tp1)
        with self.assertRaises(IllegalStateError):
            yield from consumer.seek_to_committed(tp1)
        with self.assertRaises(IllegalStateError):
            yield from consumer.seek_to_end(tp1)
Exemple #32
0
    def test_producer_indempotence_simple(self):
        # The test here will just check if we can do simple produce with
        # enable_idempotence option, as no specific API changes is expected.

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            enable_idempotence=True)
        yield from producer.start()
        self.add_cleanup(producer.stop)

        meta = yield from producer.send_and_wait(self.topic, b'hello, Kafka!')

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest")
        yield from consumer.start()
        self.add_cleanup(consumer.stop)
        msg = yield from consumer.getone()
        self.assertEqual(msg.offset, meta.offset)
        self.assertEqual(msg.timestamp, meta.timestamp)
        self.assertEqual(msg.value, b"hello, Kafka!")
        self.assertEqual(msg.key, None)
Exemple #33
0
    def test_producer_indempotence_simple(self):
        # The test here will just check if we can do simple produce with
        # enable_idempotence option, as no specific API changes is expected.

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            enable_idempotence=True)
        yield from producer.start()
        self.add_cleanup(producer.stop)

        meta = yield from producer.send_and_wait(self.topic, b'hello, Kafka!')

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest")
        yield from consumer.start()
        self.add_cleanup(consumer.stop)
        msg = yield from consumer.getone()
        self.assertEqual(msg.offset, meta.offset)
        self.assertEqual(msg.timestamp, meta.timestamp)
        self.assertEqual(msg.value, b"hello, Kafka!")
        self.assertEqual(msg.key, None)
Exemple #34
0
 async def consumer_factory(self, user="******", **kw):
     kwargs = dict(enable_auto_commit=True,
                   auto_offset_reset="earliest",
                   group_id=self.group_id)
     kwargs.update(kw)
     consumer = AIOKafkaConsumer(self.topic,
                                 bootstrap_servers=[self.sasl_hosts],
                                 security_protocol="SASL_PLAINTEXT",
                                 sasl_mechanism="PLAIN",
                                 sasl_plain_username=user,
                                 sasl_plain_password=user,
                                 **kwargs)
     self.add_cleanup(consumer.stop)
     await consumer.start()
     return consumer
Exemple #35
0
    async def gssapi_consumer_factory(self, **kw):
        if self.kafka_version == "0.9.0.1":
            kw['api_version'] = "0.9"

        kwargs = dict(enable_auto_commit=True,
                      auto_offset_reset="earliest",
                      group_id=self.group_id)
        kwargs.update(kw)
        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=[self.sasl_hosts],
                                    security_protocol="SASL_PLAINTEXT",
                                    sasl_mechanism="GSSAPI",
                                    sasl_kerberos_domain_name="localhost",
                                    **kwargs)
        self.add_cleanup(consumer.stop)
        await consumer.start()
        return consumer
Exemple #36
0
    async def test_aiter(self):
        await self.send_messages(0, list(range(0, 10)))
        await self.send_messages(1, list(range(10, 20)))

        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset='earliest')
        await consumer.start()

        messages = []
        async for m in consumer:
            messages.append(m)
            if len(messages) == 20:
                break
        self.assert_message_count(messages, 20)
        await consumer.stop()
Exemple #37
0
    def test_manual_subscribe_nogroup(self):
        msgs1 = yield from self.send_messages(0, range(0, 10))
        msgs2 = yield from self.send_messages(1, range(10, 20))
        available_msgs = msgs1 + msgs2

        consumer = AIOKafkaConsumer(
            loop=self.loop, group_id=None,
            bootstrap_servers=self.hosts, auto_offset_reset='earliest',
            enable_auto_commit=False)
        consumer.subscribe(topics=(self.topic,))
        yield from consumer.start()
        result = []
        for i in range(20):
            msg = yield from consumer.getone()
            result.append(msg.value)
        self.assertEqual(set(available_msgs), set(result))
        yield from consumer.stop()
Exemple #38
0
    def test_manual_subscribe_nogroup(self):
        msgs1 = yield from self.send_messages(0, range(0, 10))
        msgs2 = yield from self.send_messages(1, range(10, 20))
        available_msgs = msgs1 + msgs2

        consumer = AIOKafkaConsumer(
            loop=self.loop, group_id=None,
            bootstrap_servers=self.hosts, auto_offset_reset='earliest',
            enable_auto_commit=False)
        consumer.subscribe(topics=(self.topic,))
        yield from consumer.start()
        result = []
        for i in range(20):
            msg = yield from consumer.getone()
            result.append(msg.value)
        self.assertEqual(set(available_msgs), set(result))
        yield from consumer.stop()
Exemple #39
0
    def test_producer_leader_change_preserves_order(self):
        # Before 0.5.0 we did not lock partition until a response came from
        # the server, but locked the node itself.
        # For example: Say the sender sent a request to node 1 and before an
        # failure answer came we updated metadata and leader become node 0.
        # This way we may send the next batch to node 0 without waiting for
        # node 1 batch to be reenqueued, resulting in out-of-order batches

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts, linger_ms=1000)
        yield from producer.start()
        self.add_cleanup(producer.stop)

        # Alter metadata to convince the producer, that leader or partition 0
        # is a different node
        yield from producer.partitions_for(self.topic)
        topic_meta = producer._metadata._partitions[self.topic]
        real_leader = topic_meta[0].leader
        topic_meta[0] = topic_meta[0]._replace(leader=real_leader + 1)

        # Make sure the first request for produce takes more time
        original_send = producer.client.send

        @asyncio.coroutine
        def mocked_send(node_id, request, *args, **kw):
            if node_id != real_leader and \
                    request.API_KEY == ProduceResponse[0].API_KEY:
                yield from asyncio.sleep(2, loop=self.loop)

            result = yield from original_send(node_id, request, *args, **kw)
            return result
        producer.client.send = mocked_send

        # Send Batch 1. This will end up waiting for some time on fake leader
        batch = producer.create_batch()
        meta = batch.append(key=b"key", value=b"1", timestamp=None)
        batch.close()
        fut = yield from producer.send_batch(
            batch, self.topic, partition=0)

        # Make sure we sent the request
        yield from asyncio.sleep(0.1, loop=self.loop)
        # Update metadata to return leader to real one
        yield from producer.client.force_metadata_update()

        # Send Batch 2, that if it's bugged will go straight to the real node
        batch2 = producer.create_batch()
        meta2 = batch2.append(key=b"key", value=b"2", timestamp=None)
        batch2.close()
        fut2 = yield from producer.send_batch(
            batch2, self.topic, partition=0)

        batch_meta = yield from fut
        batch_meta2 = yield from fut2

        # Check the order of messages
        consumer = AIOKafkaConsumer(
            self.topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            auto_offset_reset="earliest")
        yield from consumer.start()
        self.add_cleanup(consumer.stop)
        msg = yield from consumer.getone()
        self.assertEqual(msg.offset, batch_meta.offset)
        self.assertEqual(msg.timestamp or -1, meta.timestamp)
        self.assertEqual(msg.value, b"1")
        self.assertEqual(msg.key, b"key")
        msg2 = yield from consumer.getone()
        self.assertEqual(msg2.offset, batch_meta2.offset)
        self.assertEqual(msg2.timestamp or -1, meta2.timestamp)
        self.assertEqual(msg2.value, b"2")
        self.assertEqual(msg2.key, b"key")
    async def test_producer_transactional_send_offsets_and_abort(self):
        # Following previous, we will process but abort transaction. Commit
        # should not be processed and the same data should be returned after
        # reset

        # Setup some messages in INPUT topic
        await self.send_messages(0, list(range(0, 100)))
        await self.send_messages(1, list(range(100, 200)))
        in_topic = self.topic
        out_topic = self.topic + "-out"
        group_id = self.topic + "-group"

        consumer = AIOKafkaConsumer(
            in_topic, loop=self.loop,
            bootstrap_servers=self.hosts,
            enable_auto_commit=False,
            group_id=group_id,
            auto_offset_reset="earliest")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts,
            transactional_id="sobaka_producer", client_id="p1")
        await producer.start()
        self.add_cleanup(producer.stop)

        assignment = consumer.assignment()
        self.assertTrue(assignment)
        for tp in assignment:
            await consumer.commit({tp: 0})
            offset_before = await consumer.committed(tp)
            self.assertEqual(offset_before, 0)

        async def transform(raise_error):
            while True:
                batch = await consumer.getmany(timeout_ms=5000, max_records=20)
                if not batch:
                    break
                async with producer.transaction():
                    offsets = {}
                    for tp, msgs in batch.items():
                        for msg in msgs:
                            out_msg = b"OUT-" + msg.value
                            # We produce to the same partition
                            producer.send(
                                out_topic, value=out_msg,
                                partition=tp.partition)
                        offsets[tp] = msg.offset + 1
                    await producer.send_offsets_to_transaction(
                        offsets, group_id)
                    if raise_error:
                        raise ValueError()

        try:
            await transform(raise_error=True)
        except ValueError:
            pass

        for tp in assignment:
            offset = await consumer.committed(tp)
            self.assertEqual(offset, 0)

        await consumer.seek_to_committed()
        await transform(raise_error=False)

        for tp in assignment:
            offset = await consumer.committed(tp)
            self.assertEqual(offset, 100)
Exemple #41
0
    def test_manual_subscribe_pattern(self):
        msgs1 = yield from self.send_messages(0, range(0, 10))
        msgs2 = yield from self.send_messages(1, range(10, 20))
        available_msgs = msgs1 + msgs2

        consumer = AIOKafkaConsumer(
            loop=self.loop, group_id='test-group',
            bootstrap_servers=self.hosts, auto_offset_reset='earliest',
            enable_auto_commit=False)
        consumer.subscribe(pattern="topic-test_manual_subs*")
        yield from consumer.start()
        yield from consumer.seek_to_committed()
        result = []
        for i in range(20):
            msg = yield from consumer.getone()
            result.append(msg.value)
        self.assertEqual(set(available_msgs), set(result))

        yield from consumer.commit(
            {TopicPartition(self.topic, 0): OffsetAndMetadata(9, '')})
        yield from consumer.seek_to_committed(TopicPartition(self.topic, 0))
        msg = yield from consumer.getone()
        self.assertEqual(msg.value, b'9')
        yield from consumer.commit(
            {TopicPartition(self.topic, 0): OffsetAndMetadata(10, '')})
        yield from consumer.stop()

        # subscribe by topic
        consumer = AIOKafkaConsumer(
            loop=self.loop, group_id='test-group',
            bootstrap_servers=self.hosts, auto_offset_reset='earliest',
            enable_auto_commit=False)
        consumer.subscribe(topics=(self.topic,))
        yield from consumer.start()
        yield from consumer.seek_to_committed()
        result = []
        for i in range(10):
            msg = yield from consumer.getone()
            result.append(msg.value)
        self.assertEqual(set(msgs2), set(result))
        self.assertEqual(consumer.subscription(), set([self.topic]))
        yield from consumer.stop()