예제 #1
0
    def test_coordinator_workflow(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        yield from self.wait_topic(client, 'topic2')

        subscription = SubscriptionState('latest')
        subscription.subscribe(topics=('topic1', 'topic2'))
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            session_timeout_ms=10000,
            heartbeat_interval_ms=500,
            retry_backoff_ms=100)

        self.assertEqual(coordinator.coordinator_id, None)
        self.assertEqual(coordinator.rejoin_needed, True)

        yield from coordinator.ensure_coordinator_known()
        self.assertNotEqual(coordinator.coordinator_id, None)

        yield from coordinator.ensure_active_group()
        self.assertNotEqual(coordinator.coordinator_id, None)
        self.assertEqual(coordinator.rejoin_needed, False)

        tp_list = subscription.assigned_partitions()
        self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
                                       ('topic2', 0), ('topic2', 1)]))

        # start second coordinator
        client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client2.bootstrap()
        subscription2 = SubscriptionState('latest')
        subscription2.subscribe(topics=('topic1', 'topic2'))
        coordinator2 = GroupCoordinator(
            client2, subscription2, loop=self.loop,
            session_timeout_ms=10000,
            heartbeat_interval_ms=500,
            retry_backoff_ms=100)
        yield from coordinator2.ensure_active_group()
        yield from coordinator.ensure_active_group()

        tp_list = subscription.assigned_partitions()
        self.assertEqual(len(tp_list), 2)
        tp_list2 = subscription2.assigned_partitions()
        self.assertEqual(len(tp_list2), 2)
        tp_list |= tp_list2
        self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
                                       ('topic2', 0), ('topic2', 1)]))
        yield from coordinator.close()
        yield from client.close()

        yield from asyncio.sleep(0.6, loop=self.loop)  # wait heartbeat
        yield from coordinator2.ensure_active_group()
        tp_list = subscription2.assigned_partitions()
        self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
                                       ('topic2', 0), ('topic2', 1)]))

        yield from coordinator2.close()
        yield from client2.close()
예제 #2
0
    def test_coordinator_workflow(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        yield from self.wait_topic(client, 'topic2')

        subscription = SubscriptionState('latest')
        subscription.subscribe(topics=('topic1', 'topic2'))
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            session_timeout_ms=10000,
            heartbeat_interval_ms=500,
            retry_backoff_ms=100)

        self.assertEqual(coordinator.coordinator_id, None)
        self.assertEqual(coordinator.rejoin_needed, True)

        yield from coordinator.ensure_coordinator_known()
        self.assertNotEqual(coordinator.coordinator_id, None)

        yield from coordinator.ensure_active_group()
        self.assertNotEqual(coordinator.coordinator_id, None)
        self.assertEqual(coordinator.rejoin_needed, False)

        tp_list = subscription.assigned_partitions()
        self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
                                       ('topic2', 0), ('topic2', 1)]))

        # start second coordinator
        client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client2.bootstrap()
        subscription2 = SubscriptionState('latest')
        subscription2.subscribe(topics=('topic1', 'topic2'))
        coordinator2 = GroupCoordinator(
            client2, subscription2, loop=self.loop,
            session_timeout_ms=10000,
            heartbeat_interval_ms=500,
            retry_backoff_ms=100)
        yield from coordinator2.ensure_active_group()
        yield from coordinator.ensure_active_group()

        tp_list = subscription.assigned_partitions()
        self.assertEqual(len(tp_list), 2)
        tp_list2 = subscription2.assigned_partitions()
        self.assertEqual(len(tp_list2), 2)
        tp_list |= tp_list2
        self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
                                       ('topic2', 0), ('topic2', 1)]))
        yield from coordinator.close()
        yield from client.close()

        yield from asyncio.sleep(0.6, loop=self.loop)  # wait heartbeat
        yield from coordinator2.ensure_active_group()
        tp_list = subscription2.assigned_partitions()
        self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
                                       ('topic2', 0), ('topic2', 1)]))

        yield from coordinator2.close()
        yield from client2.close()
예제 #3
0
 def test_close(self):
     client = AIOKafkaClient(["broker_1:4567"], loop=self.loop)
     m1 = mock.Mock()
     m2 = mock.Mock()
     client._conns = {("host1", 4567): m1, ("host2", 5678): m2}
     client.close()
     self.assertEqual({}, client._conns)
     m1.close.assert_raises_with()
     m2.close.assert_raises_with()
예제 #4
0
 def test_close(self):
     client = AIOKafkaClient(['broker_1:4567'], loop=self.loop)
     m1 = mock.Mock()
     m2 = mock.Mock()
     client._conns = {('host1', 4567): m1, ('host2', 5678): m2}
     client.close()
     self.assertEqual({}, client._conns)
     m1.close.assert_raises_with()
     m2.close.assert_raises_with()
예제 #5
0
 def test_send_request(self):
     client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
     yield from client.bootstrap()
     node_id = client.get_random_node()
     resp = yield from client.send(node_id, MetadataRequest([]))
     self.assertTrue(isinstance(resp, MetadataResponse))
     yield from client.close()
예제 #6
0
    def test_get_offsets(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()

        subscription = SubscriptionState("earliest")
        subscription.subscribe(topics=("topic1",))
        coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group")

        yield from self.wait_topic(client, "topic1")
        producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts)
        yield from producer.start()
        yield from producer.send("topic1", b"first msg", partition=0)
        yield from producer.send("topic1", b"second msg", partition=1)
        yield from producer.send("topic1", b"third msg", partition=1)
        yield from producer.stop()

        yield from coordinator.ensure_active_group()

        offsets = {
            TopicPartition("topic1", 0): OffsetAndMetadata(1, ""),
            TopicPartition("topic1", 1): OffsetAndMetadata(2, ""),
        }
        yield from coordinator.commit_offsets(offsets)

        self.assertEqual(subscription.all_consumed_offsets(), {})
        subscription.seek(("topic1", 0), 0)
        subscription.seek(("topic1", 1), 0)
        yield from coordinator.refresh_committed_offsets()
        self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1)
        self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2)

        yield from coordinator.close()
        yield from client.close()
예제 #7
0
 def test_send_request(self):
     client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
     yield from client.bootstrap()
     node_id = client.get_random_node()
     resp = yield from client.send(node_id, MetadataRequest([]))
     self.assertTrue(isinstance(resp, MetadataResponse))
     yield from client.close()
예제 #8
0
    def test_coordinator_ensure_active_group_on_expired_membership(self):
        # Do not fail ensure_active_group() if group membership has expired
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1', ))
        coordinator = GroupCoordinator(client,
                                       subscription,
                                       loop=self.loop,
                                       group_id='test-offsets-group')
        yield from coordinator.ensure_active_group()

        # during OffsetCommit, UnknownMemberIdError is raised
        offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
        with mock.patch('aiokafka.errors.for_code') as mocked:
            mocked.return_value = Errors.UnknownMemberIdError
            with self.assertRaises(Errors.UnknownMemberIdError):
                yield from coordinator.commit_offsets(offsets)
            self.assertEqual(subscription.needs_partition_assignment, True)

        # same exception is raised during ensure_active_group()'s call to
        # commit_offsets() via _on_join_prepare() but doesn't break this method
        with mock.patch.object(coordinator, "commit_offsets") as mocked:

            @asyncio.coroutine
            def mock_commit_offsets(*args, **kwargs):
                raise Errors.UnknownMemberIdError()

            mocked.side_effect = mock_commit_offsets

            yield from coordinator.ensure_active_group()

        yield from coordinator.close()
        yield from client.close()
예제 #9
0
    def test_coordinator_subscription_append_on_rebalance(self):
        # same as above, but with adding topics instead of replacing them
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        yield from self.wait_topic(client, 'topic2')
        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1',))
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            group_id='race-rebalance-subscribe-append',
            heartbeat_interval_ms=20000000)

        _perform_assignment = coordinator._perform_assignment
        with mock.patch.object(coordinator, '_perform_assignment') as mocked:

            def _new(*args, **kw):
                # Change the subscription to different topic before we finish
                # rebalance
                res = _perform_assignment(*args, **kw)
                subscription.subscribe(topics=('topic1', 'topic2', ))
                client.set_topics(('topic1', 'topic2', ))
                return res
            mocked.side_effect = _new

            yield from coordinator.ensure_active_group()
            self.assertEqual(subscription.needs_partition_assignment, False)
            topics = set([tp.topic for tp in subscription.assignment])
            self.assertEqual(topics, {'topic1', 'topic2'})

        yield from coordinator.close()
        yield from client.close()
예제 #10
0
    def test_failed_group_join(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        subscription = SubscriptionState("latest")
        subscription.subscribe(topics=("topic1",))
        coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10)

        yield from client.bootstrap()
        yield from self.wait_topic(client, "topic1")

        mocked = mock.MagicMock()
        coordinator._client = mocked

        # no exception expected, just wait
        mocked.send.side_effect = Errors.GroupLoadInProgressError()
        yield from coordinator._perform_group_join()
        self.assertEqual(coordinator.need_rejoin(), True)

        mocked.send.side_effect = Errors.InvalidGroupIdError()
        yield from coordinator._perform_group_join()
        self.assertEqual(coordinator.need_rejoin(), True)

        # no exception expected, member_id should be reseted
        coordinator.member_id = "some_invalid_member_id"
        mocked.send.side_effect = Errors.UnknownMemberIdError()
        yield from coordinator._perform_group_join()
        self.assertEqual(coordinator.need_rejoin(), True)
        self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)

        # no exception expected, coordinator_id should be reseted
        coordinator.coordinator_id = "some_id"
        mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
        yield from coordinator._perform_group_join()
        self.assertEqual(coordinator.need_rejoin(), True)
        self.assertEqual(coordinator.coordinator_id, None)
        yield from client.close()
예제 #11
0
    def test_subscribe_pattern(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()

        test_listener = RebalanceListenerForTest()
        subscription = SubscriptionState('latest')
        subscription.subscribe(pattern='st-topic*', listener=test_listener)
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            group_id='subs-pattern-group')

        yield from self.wait_topic(client, 'st-topic1')
        yield from self.wait_topic(client, 'st-topic2')

        yield from coordinator.ensure_active_group()
        self.assertNotEqual(coordinator.coordinator_id, None)
        self.assertEqual(coordinator.rejoin_needed, False)

        tp_list = subscription.assigned_partitions()
        assigned = set([('st-topic1', 0), ('st-topic1', 1),
                        ('st-topic2', 0), ('st-topic2', 1)])
        self.assertEqual(tp_list, assigned)

        self.assertEqual(test_listener.revoked, [set([])])
        self.assertEqual(test_listener.assigned, [assigned])
        yield from coordinator.close()
        yield from client.close()
예제 #12
0
    def test_get_offsets(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()

        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1',))
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            group_id='getoffsets-group')

        yield from self.wait_topic(client, 'topic1')
        producer = AIOKafkaProducer(
            loop=self.loop, bootstrap_servers=self.hosts)
        yield from producer.start()
        yield from producer.send('topic1', b'first msg', partition=0)
        yield from producer.send('topic1', b'second msg', partition=1)
        yield from producer.send('topic1', b'third msg', partition=1)
        yield from producer.stop()

        yield from coordinator.ensure_active_group()

        offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, ''),
                   TopicPartition('topic1', 1): OffsetAndMetadata(2, '')}
        yield from coordinator.commit_offsets(offsets)

        self.assertEqual(subscription.all_consumed_offsets(), {})
        subscription.seek(('topic1', 0), 0)
        subscription.seek(('topic1', 1), 0)
        yield from coordinator.refresh_committed_offsets()
        self.assertEqual(subscription.assignment[('topic1', 0)].committed, 1)
        self.assertEqual(subscription.assignment[('topic1', 1)].committed, 2)

        yield from coordinator.close()
        yield from client.close()
예제 #13
0
    def test_subscribe_pattern(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()

        test_listener = RebalanceListenerForTest()
        subscription = SubscriptionState('latest')
        subscription.subscribe(pattern='st-topic*', listener=test_listener)
        coordinator = GroupCoordinator(client,
                                       subscription,
                                       loop=self.loop,
                                       group_id='subs-pattern-group')

        yield from self.wait_topic(client, 'st-topic1')
        yield from self.wait_topic(client, 'st-topic2')

        yield from coordinator.ensure_active_group()
        self.assertNotEqual(coordinator.coordinator_id, None)
        self.assertEqual(coordinator.rejoin_needed, False)

        tp_list = subscription.assigned_partitions()
        assigned = set([('st-topic1', 0), ('st-topic1', 1), ('st-topic2', 0),
                        ('st-topic2', 1)])
        self.assertEqual(tp_list, assigned)

        self.assertEqual(test_listener.revoked, [set([])])
        self.assertEqual(test_listener.assigned, [assigned])
        yield from coordinator.close()
        yield from client.close()
예제 #14
0
    def test_offsets_failed_scenarios(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1', ))
        coordinator = GroupCoordinator(client,
                                       subscription,
                                       loop=self.loop,
                                       group_id='test-offsets-group')

        yield from coordinator.ensure_active_group()

        offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
        yield from coordinator.commit_offsets(offsets)
        with mock.patch('aiokafka.errors.for_code') as mocked:
            mocked.return_value = Errors.GroupAuthorizationFailedError
            with self.assertRaises(Errors.GroupAuthorizationFailedError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.TopicAuthorizationFailedError
            with self.assertRaises(Errors.TopicAuthorizationFailedError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.InvalidCommitOffsetSizeError
            with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.GroupLoadInProgressError
            with self.assertRaises(Errors.GroupLoadInProgressError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.RebalanceInProgressError
            with self.assertRaises(Errors.RebalanceInProgressError):
                yield from coordinator.commit_offsets(offsets)
            self.assertEqual(subscription.needs_partition_assignment, True)
            subscription.needs_partition_assignment = False

            mocked.return_value = Errors.UnknownMemberIdError
            with self.assertRaises(Errors.UnknownMemberIdError):
                yield from coordinator.commit_offsets(offsets)
            self.assertEqual(subscription.needs_partition_assignment, True)

            mocked.return_value = KafkaError
            with self.assertRaises(KafkaError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.NotCoordinatorForGroupError
            with self.assertRaises(Errors.NotCoordinatorForGroupError):
                yield from coordinator.commit_offsets(offsets)
            self.assertEqual(coordinator.coordinator_id, None)

            with self.assertRaises(Errors.GroupCoordinatorNotAvailableError):
                yield from coordinator.commit_offsets(offsets)

        yield from coordinator.close()
        yield from client.close()
예제 #15
0
    def test_coordinator_metadata_update_during_rebalance(self):
        # Race condition where client.set_topics start MetadataUpdate, but it
        # fails to arrive before leader performed assignment

        # Just ensure topics are created
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        yield from self.wait_topic(client, 'topic2')
        yield from client.close()

        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        self.add_cleanup(client.close)
        subscription = SubscriptionState('earliest')
        client.set_topics(["topic1"])
        subscription.subscribe(topics=('topic1', ))
        coordinator = GroupCoordinator(
            client,
            subscription,
            loop=self.loop,
            group_id='race-rebalance-metadata-update',
            heartbeat_interval_ms=20000000)
        self.add_cleanup(coordinator.close)
        yield from coordinator.ensure_active_group()
        # Check that topic's partitions are properly assigned
        self.assertEqual(subscription.needs_partition_assignment, False)
        self.assertEqual(
            set(subscription.assignment.keys()),
            {TopicPartition("topic1", 0),
             TopicPartition("topic1", 1)})

        _metadata_update = client._metadata_update
        with mock.patch.object(client, '_metadata_update') as mocked:

            @asyncio.coroutine
            def _new(*args, **kw):
                # Just make metadata updates a bit more slow for test
                # robustness
                yield from asyncio.sleep(0.5, loop=self.loop)
                res = yield from _metadata_update(*args, **kw)
                return res

            mocked.side_effect = _new

            subscription.subscribe(topics=('topic2', ))
            client.set_topics(('topic2', ))
            yield from coordinator.ensure_active_group()
            self.assertEqual(subscription.needs_partition_assignment, False)
            self.assertEqual(
                set(subscription.assignment.keys()),
                {TopicPartition("topic2", 0),
                 TopicPartition("topic2", 1)})
예제 #16
0
    def test_failed_group_join(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        subscription = SubscriptionState('latest')
        subscription.subscribe(topics=('topic1', ))
        coordinator = GroupCoordinator(client,
                                       subscription,
                                       loop=self.loop,
                                       retry_backoff_ms=10)

        @asyncio.coroutine
        def do_rebalance():
            rebalance = CoordinatorGroupRebalance(
                coordinator,
                coordinator.group_id,
                coordinator.coordinator_id,
                subscription.subscription,
                coordinator._assignors,
                coordinator._session_timeout_ms,
                coordinator._retry_backoff_ms,
                loop=self.loop)
            yield from rebalance.perform_group_join()

        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')

        mocked = mock.MagicMock()
        coordinator._client = mocked

        # no exception expected, just wait
        mocked.send.side_effect = Errors.GroupLoadInProgressError()
        yield from do_rebalance()
        self.assertEqual(coordinator.need_rejoin(), True)

        mocked.send.side_effect = Errors.InvalidGroupIdError()
        with self.assertRaises(Errors.InvalidGroupIdError):
            yield from do_rebalance()
        self.assertEqual(coordinator.need_rejoin(), True)

        # no exception expected, member_id should be reseted
        coordinator.member_id = 'some_invalid_member_id'
        mocked.send.side_effect = Errors.UnknownMemberIdError()
        yield from do_rebalance()
        self.assertEqual(coordinator.need_rejoin(), True)
        self.assertEqual(coordinator.member_id,
                         JoinGroupRequest.UNKNOWN_MEMBER_ID)

        # no exception expected, coordinator_id should be reseted
        coordinator.coordinator_id = 'some_id'
        mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
        yield from do_rebalance()
        self.assertEqual(coordinator.need_rejoin(), True)
        self.assertEqual(coordinator.coordinator_id, None)
        yield from client.close()
예제 #17
0
    def test_offsets_failed_scenarios(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1',))
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            group_id='test-offsets-group')

        yield from coordinator.ensure_active_group()

        offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
        yield from coordinator.commit_offsets(offsets)
        with mock.patch('kafka.common.for_code') as mocked:
            mocked.return_value = Errors.GroupAuthorizationFailedError
            with self.assertRaises(Errors.GroupAuthorizationFailedError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.TopicAuthorizationFailedError
            with self.assertRaises(Errors.TopicAuthorizationFailedError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.InvalidCommitOffsetSizeError
            with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.GroupLoadInProgressError
            with self.assertRaises(Errors.GroupLoadInProgressError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.RebalanceInProgressError
            with self.assertRaises(Errors.RebalanceInProgressError):
                yield from coordinator.commit_offsets(offsets)
            self.assertEqual(subscription.needs_partition_assignment, True)

            mocked.return_value = KafkaError
            with self.assertRaises(KafkaError):
                yield from coordinator.commit_offsets(offsets)

            mocked.return_value = Errors.NotCoordinatorForGroupError
            with self.assertRaises(Errors.NotCoordinatorForGroupError):
                yield from coordinator.commit_offsets(offsets)
            self.assertEqual(coordinator.coordinator_id, None)

            with self.assertRaises(
                    Errors.GroupCoordinatorNotAvailableError):
                yield from coordinator.commit_offsets(offsets)

        yield from coordinator.close()
        yield from client.close()
예제 #18
0
    def wait_kafka(cls):
        cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)]

        # Reconnecting until Kafka in docker becomes available
        client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts)

        for i in range(500):
            try:
                cls.loop.run_until_complete(client.bootstrap())
            except ConnectionError:
                time.sleep(0.1)
            else:
                cls.loop.run_until_complete(client.close())
                break
예제 #19
0
    def test_metadata_update_fail(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()

        with mock.patch.object(AIOKafkaConnection, 'send') as mocked:
            mocked.side_effect = KafkaError('mocked exception')

            updated = yield from client.force_metadata_update()

            self.assertEqual(updated, False)

            with self.assertRaises(KafkaError):
                yield from client.fetch_all_metadata()
        yield from client.close()
예제 #20
0
    def test_metadata_update_fail(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        # Make sure the connection is initialize before mock to avoid crashing
        # api_version routine
        yield from client.force_metadata_update()

        with mock.patch.object(AIOKafkaConnection, 'send') as mocked:
            mocked.side_effect = KafkaError('mocked exception')

            updated = yield from client.force_metadata_update()

            self.assertEqual(updated, False)

            with self.assertRaises(KafkaError):
                yield from client.fetch_all_metadata()
        yield from client.close()
예제 #21
0
    def test_metadata_update_fail(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        # Make sure the connection is initialize before mock to avoid crashing
        # api_version routine
        yield from client.force_metadata_update()

        with mock.patch.object(
                AIOKafkaConnection, 'send') as mocked:
            mocked.side_effect = KafkaError('mocked exception')

            updated = yield from client.force_metadata_update()

            self.assertEqual(updated, False)

            with self.assertRaises(KafkaError):
                yield from client.fetch_all_metadata()
        yield from client.close()
예제 #22
0
    def wait_kafka(cls):
        cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)]

        # Reconnecting until Kafka in docker becomes available
        client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts)
        for i in range(500):
            try:
                cls.loop.run_until_complete(client.bootstrap())
                # Wait for broker to look for others.
                if not client.cluster.brokers():
                    time.sleep(0.1)
                    continue
            except ConnectionError:
                time.sleep(0.1)
            else:
                cls.loop.run_until_complete(client.close())
                return
        assert False, "Kafka server never started"
예제 #23
0
    def test_metadata_synchronizer(self):
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=self.hosts,
                                metadata_max_age_ms=100)

        with mock.patch.object(AIOKafkaClient, '_metadata_update') as mocked:

            @asyncio.coroutine
            def dummy(*d, **kw):
                client.cluster.failed_update(None)

            mocked.side_effect = dummy

            yield from client.bootstrap()
            yield from asyncio.sleep(0.15, loop=self.loop)
            yield from client.close()

            self.assertNotEqual(len(client._metadata_update.mock_calls), 0)
예제 #24
0
    def wait_kafka(cls):
        cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)]

        # Reconnecting until Kafka in docker becomes available
        for i in range(500):
            client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts)
            try:
                cls.loop.run_until_complete(client.bootstrap())
                # Broker can still be loading cluster layout, so we can get 0
                # brokers. That counts as still not available
                if client.cluster.brokers():
                    return
            except ConnectionError:
                pass
            finally:
                cls.loop.run_until_complete(client.close())
            time.sleep(0.1)
        assert False, "Kafka server never started"
예제 #25
0
    def test_bootstrap(self):
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers='0.42.42.42:444')
        with self.assertRaises(ConnectionError):
            yield from client.bootstrap()

        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'test_topic')

        metadata = yield from client.fetch_all_metadata()
        self.assertTrue('test_topic' in metadata.topics())

        client.set_topics(['t2', 't3'])
        client.set_topics(['t2', 't3'])  # should be ignored
        client.add_topic('t2')  # shold be ignored
        # bootstrap again -- no error expected
        yield from client.bootstrap()
        yield from client.close()
예제 #26
0
    def test_bootstrap(self):
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers='0.42.42.42:444')
        with self.assertRaises(ConnectionError):
            yield from client.bootstrap()

        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'test_topic')

        metadata = yield from client.fetch_all_metadata()
        self.assertTrue('test_topic' in metadata.topics())

        client.set_topics(['t2', 't3'])
        client.set_topics(['t2', 't3'])  # should be ignored
        client.add_topic('t2')  # shold be ignored
        # bootstrap again -- no error expected
        yield from client.bootstrap()
        yield from client.close()
예제 #27
0
    def test_metadata_synchronizer(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=self.hosts,
            metadata_max_age_ms=100)

        with mock.patch.object(
                AIOKafkaClient, '_metadata_update') as mocked:
            @asyncio.coroutine
            def dummy(*d, **kw):
                client.cluster.failed_update(None)
            mocked.side_effect = dummy

            yield from client.bootstrap()
            yield from asyncio.sleep(0.15, loop=self.loop)
            yield from client.close()

            self.assertNotEqual(
                len(client._metadata_update.mock_calls), 0)
예제 #28
0
    def test_check_version(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        ver = yield from client.check_version()
        self.assertTrue('0.' in ver)
        yield from self.wait_topic(client, 'some_test_topic')
        ver2 = yield from client.check_version()
        self.assertEqual(ver, ver2)
        ver2 = yield from client.check_version(client.get_random_node())
        self.assertEqual(ver, ver2)

        with mock.patch.object(AIOKafkaConnection, 'send') as mocked:
            mocked.side_effect = KafkaError('mocked exception')
            with self.assertRaises(UnrecognizedBrokerVersion):
                yield from client.check_version(client.get_random_node())

        client._get_conn = asyncio.coroutine(lambda _: None)
        with self.assertRaises(ConnectionError):
            yield from client.check_version()
        yield from client.close()
예제 #29
0
    def test_fetchoffsets_failed_scenarios(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1', ))
        coordinator = GroupCoordinator(client,
                                       subscription,
                                       loop=self.loop,
                                       group_id='fetch-offsets-group')

        yield from coordinator.ensure_active_group()

        offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
        with mock.patch('kafka.common.for_code') as mocked:
            mocked.side_effect = MockedKafkaErrCode(
                Errors.GroupLoadInProgressError, Errors.NoError)
            yield from coordinator.fetch_committed_offsets(offsets)

            mocked.side_effect = MockedKafkaErrCode(
                Errors.UnknownMemberIdError, Errors.NoError)
            with self.assertRaises(Errors.UnknownMemberIdError):
                yield from coordinator.fetch_committed_offsets(offsets)
            self.assertEqual(subscription.needs_partition_assignment, True)

            mocked.side_effect = None
            mocked.return_value = Errors.UnknownTopicOrPartitionError
            r = yield from coordinator.fetch_committed_offsets(offsets)
            self.assertEqual(r, {})

            mocked.return_value = KafkaError
            with self.assertRaises(KafkaError):
                yield from coordinator.fetch_committed_offsets(offsets)

            mocked.side_effect = MockedKafkaErrCode(
                Errors.NotCoordinatorForGroupError, Errors.NoError,
                Errors.NoError, Errors.NoError)
            yield from coordinator.fetch_committed_offsets(offsets)

        yield from coordinator.close()
        yield from client.close()
예제 #30
0
def wait_kafka(kafka_host, kafka_port, timeout=60):
    hosts = ['{}:{}'.format(kafka_host, kafka_port)]
    loop = asyncio.get_event_loop()

    # Reconnecting until Kafka in docker becomes available
    start = loop.time()
    while True:
        client = AIOKafkaClient(loop=loop, bootstrap_servers=hosts)
        try:
            loop.run_until_complete(client.bootstrap())
            # Broker can still be loading cluster layout, so we can get 0
            # brokers. That counts as still not available
            if client.cluster.brokers():
                return True
        except KafkaConnectionError:
            pass
        finally:
            loop.run_until_complete(client.close())
        time.sleep(0.5)
        if loop.time() - start > timeout:
            return False
예제 #31
0
    def setUp(self):
        super().setUp()
        self.hosts = ['{}:{}'.format(self.kafka_host, self.kafka_port)]

        if not self.topic:
            self.topic = "topic-{}-{}".format(
                self.id()[self.id().rindex(".") + 1:],
                random_string(10).decode('utf-8'))

        # Reconnecting until Kafka in docker becomes available
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        for i in range(500):
            try:
                self.loop.run_until_complete(client.bootstrap())

            except ConnectionError:
                time.sleep(0.1)
            else:
                self.loop.run_until_complete(client.close())
                break
        self._messages = {}
예제 #32
0
    def test_fetchoffsets_failed_scenarios(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        subscription = SubscriptionState('earliest')
        subscription.subscribe(topics=('topic1',))
        coordinator = GroupCoordinator(
            client, subscription, loop=self.loop,
            group_id='fetch-offsets-group')

        yield from coordinator.ensure_active_group()

        offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
        with mock.patch('kafka.common.for_code') as mocked:
            mocked.side_effect = MockedKafkaErrCode(
                Errors.GroupLoadInProgressError, Errors.NoError)
            yield from coordinator.fetch_committed_offsets(offsets)

            mocked.side_effect = MockedKafkaErrCode(
                Errors.UnknownMemberIdError, Errors.NoError)
            with self.assertRaises(Errors.UnknownMemberIdError):
                yield from coordinator.fetch_committed_offsets(offsets)
            self.assertEqual(subscription.needs_partition_assignment, True)

            mocked.side_effect = None
            mocked.return_value = Errors.UnknownTopicOrPartitionError
            r = yield from coordinator.fetch_committed_offsets(offsets)
            self.assertEqual(r, {})

            mocked.return_value = KafkaError
            with self.assertRaises(KafkaError):
                yield from coordinator.fetch_committed_offsets(offsets)

            mocked.side_effect = MockedKafkaErrCode(
                Errors.NotCoordinatorForGroupError,
                Errors.NoError, Errors.NoError, Errors.NoError)
            yield from coordinator.fetch_committed_offsets(offsets)

        yield from coordinator.close()
        yield from client.close()
예제 #33
0
    def test_coordinator_metadata_change_by_broker(self):
        # Issue #108. We can have a misleading metadata change, that will
        # trigger additional rebalance
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        yield from self.wait_topic(client, 'topic1')
        yield from self.wait_topic(client, 'topic2')
        client.set_topics(['other_topic'])
        yield from client.force_metadata_update()

        subscription = SubscriptionState('earliest')
        coordinator = GroupCoordinator(
            client,
            subscription,
            loop=self.loop,
            group_id='race-rebalance-subscribe-append',
            heartbeat_interval_ms=2000000)
        subscription.subscribe(topics=('topic1', ))
        yield from client.set_topics(subscription.group_subscription())
        yield from coordinator.ensure_active_group()

        _perform_assignment = coordinator._perform_assignment
        with mock.patch.object(coordinator, '_perform_assignment') as mocked:
            mocked.side_effect = _perform_assignment

            subscription.subscribe(topics=('topic2', ))
            yield from client.set_topics(subscription.group_subscription())

            # Should only trigger 1 rebalance, but will trigger 2 with bug:
            #   Metadata snapshot will change:
            #   {'topic1': {0, 1}} -> {'topic1': {0, 1}, 'topic2': {0, 1}}
            #   And then again:
            #   {'topic1': {0, 1}, 'topic2': {0, 1}} -> {'topic2': {0, 1}}
            yield from coordinator.ensure_active_group()
            yield from client.force_metadata_update()
            yield from coordinator.ensure_active_group()
            self.assertEqual(mocked.call_count, 1)

        yield from coordinator.close()
        yield from client.close()
예제 #34
0
    def setUp(self):
        super().setUp()
        self.hosts = ['{}:{}'.format(self.kafka_host, self.kafka_port)]

        if not self.topic:
            self.topic = "topic-{}-{}".format(
                self.id()[self.id().rindex(".") + 1:],
                random_string(10).decode('utf-8'))

        # Reconnecting until Kafka in docker becomes available
        client = AIOKafkaClient(
            loop=self.loop, bootstrap_servers=self.hosts)
        for i in range(500):
            try:
                self.loop.run_until_complete(client.bootstrap())

            except ConnectionError:
                time.sleep(0.1)
            else:
                self.loop.run_until_complete(client.close())
                break
        self._messages = {}
예제 #35
0
    def test_check_version(self):
        kafka_version = tuple(int(x) for x in self.kafka_version.split("."))

        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        yield from client.bootstrap()
        ver = yield from client.check_version()
        self.assertEqual(kafka_version[:2], ver[:2])
        yield from self.wait_topic(client, 'some_test_topic')
        ver2 = yield from client.check_version()
        self.assertEqual(ver, ver2)
        ver2 = yield from client.check_version(client.get_random_node())
        self.assertEqual(ver, ver2)

        with mock.patch.object(
                AIOKafkaConnection, 'send') as mocked:
            mocked.side_effect = KafkaError('mocked exception')
            with self.assertRaises(UnrecognizedBrokerVersion):
                yield from client.check_version(client.get_random_node())

        client._get_conn = asyncio.coroutine(lambda _, **kw: None)
        with self.assertRaises(ConnectionError):
            yield from client.check_version()
        yield from client.close()
예제 #36
0
    def test_exclude_internal_topics(self):
        # Create random topic
        my_topic = "some_noninternal_topic"
        client = AIOKafkaClient(
            loop=self.loop, bootstrap_servers=self.hosts,
            client_id="test_autocreate")
        yield from client.bootstrap()
        yield from client._wait_on_metadata(my_topic)
        yield from client.close()

        # Check if only it will be subscribed
        pattern = "^.*$"
        consumer = AIOKafkaConsumer(
            loop=self.loop, bootstrap_servers=self.hosts,
            metadata_max_age_ms=200, group_id="some_group_1",
            auto_offset_reset="earliest",
            exclude_internal_topics=False)
        consumer.subscribe(pattern=pattern)
        yield from consumer.start()
        self.assertIn("__consumer_offsets", consumer.subscription())
        yield from consumer._client.force_metadata_update()
        self.assertIn("__consumer_offsets", consumer.subscription())
        yield from consumer.stop()
예제 #37
0
class BaseProducer(abc.ABC):

    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    _COMPRESSORS = {
        'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
        'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
        'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
    }

    _closed = None  # Serves as an uninitialized flag for __del__
    _source_traceback = None

    def __init__(self, *, loop, bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000, request_timeout_ms=40000,
                 api_version='auto', acks=_missing,
                 key_serializer=None, value_serializer=None,
                 compression_type=None, max_batch_size=16384,
                 partitioner=DefaultPartitioner(), max_request_size=1048576,
                 linger_ms=0, send_backoff_ms=100,
                 retry_backoff_ms=100, security_protocol="PLAINTEXT",
                 ssl_context=None, connections_max_idle_ms=540000,
                 on_irrecoverable_error=None,
                 enable_idempotence=False, transactional_id=None,
                 transaction_timeout_ms=60000, sasl_mechanism="PLAIN",
                 sasl_plain_password=None, sasl_plain_username=None,
                 sasl_kerberos_service_name='kafka',
                 sasl_kerberos_domain_name=None):
        if acks not in (0, 1, -1, 'all', _missing):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if compression_type:
            checker, compression_attrs = self._COMPRESSORS[compression_type]
            if not checker():
                raise RuntimeError("Compression library for {} not found"
                                   .format(compression_type))
        else:
            compression_attrs = 0
        self._compression_attrs = compression_attrs

        if acks is _missing:
            acks = 1
        elif acks == 'all':
            acks = -1

        AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE
        self._bootstrap_servers = bootstrap_servers
        self._client_id = client_id
        self._metadata_max_age_ms = metadata_max_age_ms
        self._request_timeout_ms = request_timeout_ms
        self._api_version = api_version
        self._acks = acks
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._max_batch_size = max_batch_size
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._linger_ms = linger_ms
        self._send_backoff_ms = send_backoff_ms
        self._retry_backoff_ms = retry_backoff_ms
        self._security_protocol = security_protocol
        self._ssl_context = ssl_context
        self._connections_max_idle_ms = connections_max_idle_ms
        self._transaction_timeout_ms = transaction_timeout_ms
        self._transaction_timeout_ms = transaction_timeout_ms
        self._on_irrecoverable_error = on_irrecoverable_error
        self._sasl_mechanism = sasl_mechanism
        self._sasl_plain_username = sasl_plain_username
        self._sasl_plain_password = sasl_plain_password
        self._sasl_kerberos_service_name = sasl_kerberos_service_name
        self._sasl_kerberos_domain_name = sasl_kerberos_domain_name

        self.client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms,
            retry_backoff_ms=retry_backoff_ms,
            api_version=api_version, security_protocol=security_protocol,
            ssl_context=ssl_context,
            connections_max_idle_ms=connections_max_idle_ms,
            sasl_mechanism=sasl_mechanism,
            sasl_plain_username=sasl_plain_username,
            sasl_plain_password=sasl_plain_password,
            sasl_kerberos_service_name=sasl_kerberos_service_name,
            sasl_kerberos_domain_name=sasl_kerberos_domain_name)
        self._metadata = self.client.cluster
        self._loop = loop
        if loop.get_debug():
            self._source_traceback = traceback.extract_stack(sys._getframe(1))
        self._closed = False

    if PY_341:
        # Warn if producer was not closed properly
        # We don't attempt to close the Consumer, as __del__ is synchronous
        def __del__(self, _warnings=warnings):
            if self._closed is False:
                if PY_36:
                    kwargs = {'source': self}
                else:
                    kwargs = {}
                _warnings.warn("Unclosed AIOKafkaProducer {!r}".format(self),
                               ResourceWarning,
                               **kwargs)
                context = {'producer': self,
                           'message': 'Unclosed AIOKafkaProducer'}
                if self._source_traceback is not None:
                    context['source_traceback'] = self._source_traceback
                self._loop.call_exception_handler(context)

    @abc.abstractmethod
    def _on_set_api_version(self, api_version):
        ...

    @abc.abstractmethod
    def _message_accumulator_for(self, transactional_id, tp):
        ...

    @abc.abstractmethod
    def _transactional_id_or_default(self, transactional_id):
        ...

    @abc.abstractmethod
    def _verify_txn_started(self, transactional_id):
        ...

    @abc.abstractmethod
    def _wait_for_sender(self):
        ...

    @abc.abstractmethod
    def _ensure_transactional(self):
        ...

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()
        if self._closed:
            return
        api_version = self.client.api_version

        self._verify_api_version(api_version)
        yield from self._start_sender()
        self._on_set_api_version(api_version)

        self._producer_magic = 0 if api_version < (0, 10) else 1
        log.debug("Kafka producer started")

    def _verify_api_version(self, api_version):
        if self._compression_type == 'lz4':
            assert self.client.api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluster"""
        if self._closed:
            return
        self._closed = True
        self.client.set_close()

        yield from self._wait_for_sender()

        yield from self.client.close()
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self.client._wait_on_metadata(topic))

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = LegacyRecordBatchBuilder.record_overhead(
            self._producer_magic)
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value,
                   serialized_key, serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(
            serialized_key, all_partitions, available)

    @asyncio.coroutine
    def send(self, topic, value=None, key=None, partition=None,
             timestamp_ms=None, headers=None, transactional_id=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.
            timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970
                UTC) to use as the message timestamp. Defaults to current time.

        Returns:
            asyncio.Future: object that will be set when message is
            processed

        Raises:
            kafka.KafkaTimeoutError: if we can't schedule this record (
                pending buffer is full) in up to `request_timeout_ms`
                milliseconds.

        Note:
            The returned future will wait based on `request_timeout_ms`
            setting. Cancelling the returned future **will not** stop event
            from being sent, but cancelling the ``send`` coroutine itself
            **will**.
        """
        assert value is not None or self.client.api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'
        transactional_id = self._transactional_id_or_default(transactional_id)

        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)

        # Ensure transaction is started and not committing
        self._verify_txn_started(transactional_id)

        if headers is not None:
            if self.client.api_version < (0, 11):
                raise UnsupportedVersionError(
                    "Headers not supported before Kafka 0.11")
        else:
            # Record parser/builder support only list type, no explicit None
            headers = []

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value,
                                    key_bytes, value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        message_accumulator = self._message_accumulator_for(
            transactional_id, tp)
        fut = yield from message_accumulator.add_message(
            tp, key_bytes, value_bytes, self._request_timeout_ms / 1000,
            timestamp_ms=timestamp_ms, headers=headers)
        return fut

    @asyncio.coroutine
    def send_and_wait(self, topic, value=None, key=None, partition=None,
                      timestamp_ms=None):
        """Publish a message to a topic and wait the result"""
        future = yield from self.send(
            topic, value, key, partition, timestamp_ms)
        return (yield from future)
예제 #38
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in waiting for all replicas to
    respond, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to acks=1.
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, if first request is processed faster,
            than `linger_ms`, producer will wait `linger_ms - process_time`.
            This setting defaults to 0 (i.e. no delay).
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the Java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest (it's a blocking call),
            maximum waiting time can be up to 2 * request_timeout_ms.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
            socket connections. Directly passed into asyncio's
            `create_connection`_. For more information see :ref:`ssl_auth`.
            Default: None.

    Note:
        Many configuration parameters are taken from the Java client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    _COMPRESSORS = {
        'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
        'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
        'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
    }

    def __init__(self,
                 *,
                 loop,
                 bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000,
                 request_timeout_ms=40000,
                 api_version='auto',
                 acks=1,
                 key_serializer=None,
                 value_serializer=None,
                 compression_type=None,
                 max_batch_size=16384,
                 partitioner=DefaultPartitioner(),
                 max_request_size=1048576,
                 linger_ms=0,
                 send_backoff_ms=100,
                 retry_backoff_ms=100,
                 security_protocol="PLAINTEXT",
                 ssl_context=None):
        if acks not in (0, 1, -1, 'all'):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if compression_type:
            checker, compression_attrs = self._COMPRESSORS[compression_type]
            if not checker():
                raise RuntimeError(
                    "Compression library for {} not found".format(
                        compression_type))
        else:
            compression_attrs = 0

        if api_version not in ('auto', '0.10', '0.9', '0.8.2', '0.8.1',
                               '0.8.0'):
            raise ValueError("Unsupported Kafka version")

        self._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                self._PRODUCER_CLIENT_ID_SEQUENCE

        if acks == 'all':
            acks = -1
        self._acks = acks
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(loop=loop,
                                     bootstrap_servers=bootstrap_servers,
                                     client_id=client_id,
                                     metadata_max_age_ms=metadata_max_age_ms,
                                     request_timeout_ms=request_timeout_ms,
                                     retry_backoff_ms=retry_backoff_ms,
                                     api_version=api_version,
                                     security_protocol=security_protocol,
                                     ssl_context=ssl_context)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, compression_attrs,
            self._request_timeout_ms / 1000, loop)
        self._sender_task = None
        self._in_flight = set()
        self._closed = False
        self._loop = loop
        self._retry_backoff = retry_backoff_ms / 1000
        self._linger_time = linger_ms / 1000
        self._producer_magic = 0

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        if self._compression_type == 'lz4':
            assert self.client.api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        self._sender_task = ensure_future(self._sender_routine(),
                                          loop=self._loop)
        self._message_accumulator.set_api_version(self.client.api_version)
        self._producer_magic = 0 if self.client.api_version < (0, 10) else 1
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def flush(self):
        """Wait untill all batches are Delivered and futures resolved"""
        yield from self._message_accumulator.flush()

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluster"""
        if self._closed:
            return
        self._closed = True

        yield from self._message_accumulator.close()

        if self._sender_task:
            self._sender_task.cancel()
            yield from self._sender_task

        yield from self.client.close()
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self.client._wait_on_metadata(topic))

    @asyncio.coroutine
    def send(self,
             topic,
             value=None,
             key=None,
             partition=None,
             timestamp_ms=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.
            timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970
                UTC) to use as the message timestamp. Defaults to current time.

        Returns:
            asyncio.Future: object that will be set when message is
            processed

        Raises:
            kafka.KafkaTimeoutError: if we can't schedule this record (
                pending buffer is full) in up to `request_timeout_ms`
                milliseconds.

        Note:
            The returned future will wait based on `request_timeout_ms`
            setting. Cancelling the returned future **will not** stop event
            from being sent, but cancelling the ``send`` coroutine itself
            **will**.
        """
        assert value is not None or self.client.api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value, key_bytes,
                                    value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp,
            key_bytes,
            value_bytes,
            self._request_timeout_ms / 1000,
            timestamp_ms=timestamp_ms)
        return fut

    @asyncio.coroutine
    def send_and_wait(self,
                      topic,
                      value=None,
                      key=None,
                      partition=None,
                      timestamp_ms=None):
        """Publish a message to a topic and wait the result"""
        future = yield from self.send(topic, value, key, partition,
                                      timestamp_ms)
        return (yield from future)

    @asyncio.coroutine
    def _sender_routine(self):
        """ Background task, that sends pending batches to leader nodes for
        batch's partition. This incapsulates same logic as Java's `Sender`
        background thread. Because we use asyncio this is more event based
        loop, rather than counting timeout till next possible even like in
        Java.

            The procedure:
            * Group pending batches by partition leaders (write nodes)
            * Ignore not ready (disconnected) and nodes, that already have a
              pending request.
            * If we have unknown leaders for partitions, we request a metadata
              update.
            * Wait for any event, that can change the above procedure, like
              new metadata or pending send is finished and a new one can be
              done.
        """
        tasks = set()
        try:
            while True:
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(self._send_produce_req(
                        node_id, batches),
                                         loop=self._loop)
                    self._in_flight.add(node_id)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    fut = self.client.force_metadata_update()
                    waiters = tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters = tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                # * Metadata update if partition leader unknown
                done, _ = yield from asyncio.wait(
                    waiters,
                    return_when=asyncio.FIRST_COMPLETED,
                    loop=self._loop)

                # done tasks should never produce errors, if they are it's a
                # bug
                for task in done:
                    task.result()

                tasks -= done

        except asyncio.CancelledError:
            # done tasks should never produce errors, if they are it's a bug
            for task in tasks:
                yield from task
        except Exception:  # pragma: no cover
            log.error("Unexpected error in sender routine", exc_info=True)

    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """ Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        t0 = self._loop.time()

        topics = collections.defaultdict(list)
        for tp, batch in batches.items():
            topics[tp.topic].append((tp.partition, batch.get_data_buffer()))

        if self.client.api_version >= (0, 10):
            version = 2
        elif self.client.api_version == (0, 9):
            version = 1
        else:
            version = 0

        request = ProduceRequest[version](required_acks=self._acks,
                                          timeout=self._request_timeout_ms,
                                          topics=list(topics.items()))

        reenqueue = []
        try:
            response = yield from self.client.send(node_id, request)
        except KafkaError as err:
            log.warning("Got error produce response: %s", err)
            if getattr(err, "invalid_metadata", False):
                self.client.force_metadata_update()

            for batch in batches.values():
                if not self._can_retry(err, batch):
                    batch.failure(exception=err)
                else:
                    reenqueue.append(batch)
        else:
            # noacks, just mark batches as "done"
            if request.required_acks == 0:
                for batch in batches.values():
                    batch.done_noack()
            else:
                for topic, partitions in response.topics:
                    for partition_info in partitions:
                        if response.API_VERSION < 2:
                            partition, error_code, offset = partition_info
                            # Mimic CREATE_TIME to take user provided timestamp
                            timestamp = -1
                        else:
                            partition, error_code, offset, timestamp = \
                                partition_info
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset, timestamp)
                        elif not self._can_retry(error(), batch):
                            batch.failure(exception=error())
                        else:
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)
                            # Ok, we can retry this batch
                            if getattr(error, "invalid_metadata", False):
                                self.client.force_metadata_update()
                            reenqueue.append(batch)

        if reenqueue:
            # Wait backoff before reequeue
            yield from asyncio.sleep(self._retry_backoff, loop=self._loop)

            for batch in reenqueue:
                self._message_accumulator.reenqueue(batch)
            # If some error started metadata refresh we have to wait before
            # trying again
            yield from self.client._maybe_wait_metadata()

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

    def _can_retry(self, error, batch):
        if batch.expired():
            return False
        # XXX: remove unknown topic check as we fix
        #      https://github.com/dpkp/kafka-python/issues/1155
        if error.retriable or isinstance(error, UnknownTopicOrPartitionError)\
                or error is UnknownTopicOrPartitionError:
            return True
        return False

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = LegacyRecordBatchBuilder.record_overhead(
            self._producer_magic)
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value, serialized_key,
                   serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(serialized_key, all_partitions, available)

    def create_batch(self):
        """Create and return an empty BatchBuilder.

        The batch is not queued for send until submission to ``send_batch``.

        Returns:
            BatchBuilder: empty batch to be filled and submitted by the caller.
        """
        return self._message_accumulator.create_builder()

    @asyncio.coroutine
    def send_batch(self, batch, topic, *, partition):
        """Submit a BatchBuilder for publication.

        Arguments:
            batch (BatchBuilder): batch object to be published.
            topic (str): topic where the batch will be published.
            partition (int): partition where this batch will be published.

        Returns:
            asyncio.Future: object that will be set when the batch is
                delivered.
        """
        partition = self._partition(topic, partition, None, None, None, None)
        tp = TopicPartition(topic, partition)
        log.debug("Sending batch to %s", tp)
        future = yield from self._message_accumulator.add_batch(
            batch, tp, self._request_timeout_ms / 1000)
        return future
예제 #39
0
파일: consumer.py 프로젝트: lins05/aiokafka
class AIOKafkaConsumer(object):
    """Consume records from a Kafka cluster.

    The consumer will transparently handle the failure of servers in the Kafka
    cluster, and adapt as topic-partitions are created or migrate between
    brokers. It also interacts with the assigned kafka Group Coordinator node
    to allow multiple consumers to load balance consumption of topics (feature
    of kafka >= 0.9.0.0).

    Arguments:
        *topics (str): optional list of topics to subscribe to. If not set,
            call subscribe() or assign() before consuming records.
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'aiokafka-{version}'
        group_id (str or None): name of the consumer group to join for dynamic
            partition assignment (if enabled), and to use for fetching and
            committing offsets. If None, auto-partition assignment (via
            group coordinator) and offset commits are disabled.
            Default: None
        key_deserializer (callable): Any callable that takes a
            raw message key and returns a deserialized key.
        value_deserializer (callable, optional): Any callable that takes a
            raw message value and returns a deserialized value.
        fetch_min_bytes (int): Minimum amount of data the server should
            return for a fetch request, otherwise wait up to
            fetch_max_wait_ms for more data to accumulate. Default: 1.
        fetch_max_wait_ms (int): The maximum amount of time in milliseconds
            the server will block before answering the fetch request if
            there isn't sufficient data to immediately satisfy the
            requirement given by fetch_min_bytes. Default: 500.
        max_partition_fetch_bytes (int): The maximum amount of data
            per-partition the server will return. The maximum total memory
            used for a request = #partitions * max_partition_fetch_bytes.
            This size must be at least as large as the maximum message size
            the server allows or else it is possible for the producer to
            send messages larger than the consumer can fetch. If that
            happens, the consumer can get stuck trying to fetch a large
            message on a certain partition. Default: 1048576.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        auto_offset_reset (str): A policy for resetting offsets on
            OffsetOutOfRange errors: 'earliest' will move to the oldest
            available message, 'latest' will move to the most recent. Any
            ofther value will raise the exception. Default: 'latest'.
        enable_auto_commit (bool): If true the consumer's offset will be
            periodically committed in the background. Default: True.
        auto_commit_interval_ms (int): milliseconds between automatic
            offset commits, if enable_auto_commit is True. Default: 5000.
        check_crcs (bool): Automatically check the CRC32 of the records
            consumed. This ensures no on-the-wire or on-disk corruption to
            the messages occurred. This check adds some overhead, so it may
            be disabled in cases seeking extreme performance. Default: True
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        partition_assignment_strategy (list): List of objects to use to
            distribute partition ownership amongst consumer instances when
            group management is used. This preference is implicit in the order
            of the strategies in the list. When assignment strategy changes:
            to support a change to the assignment strategy, new versions must
            enable support both for the old assignment strategy and the new
            one. The coordinator will choose the old assignment strategy until
            all members have been updated. Then it will choose the new
            strategy. Default: [RoundRobinPartitionAssignor]
        heartbeat_interval_ms (int): The expected time in milliseconds
            between heartbeats to the consumer coordinator when using
            Kafka's group management feature. Heartbeats are used to ensure
            that the consumer's session stays active and to facilitate
            rebalancing when new consumers join or leave the group. The
            value must be set lower than session_timeout_ms, but typically
            should be set no higher than 1/3 of that value. It can be
            adjusted even lower to control the expected time for normal
            rebalances. Default: 3000
        session_timeout_ms (int): The timeout used to detect failures when
            using Kafka's group managementment facilities. Default: 30000
        consumer_timeout_ms (int): maximum wait timeout for background fetching
            routine. Mostly defines how fast the system will see rebalance and
            request new data for new partitions. Default: 200
        api_version (str): specify which kafka API version to use.
            AIOKafkaConsumer supports Kafka API versions >=0.9 only.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto


    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#newconsumerconfigs

    """
    def __init__(self,
                 *topics,
                 loop,
                 bootstrap_servers='localhost',
                 client_id='aiokafka-' + __version__,
                 group_id=None,
                 key_deserializer=None,
                 value_deserializer=None,
                 fetch_max_wait_ms=500,
                 fetch_min_bytes=1,
                 max_partition_fetch_bytes=1 * 1024 * 1024,
                 request_timeout_ms=40 * 1000,
                 retry_backoff_ms=100,
                 auto_offset_reset='latest',
                 enable_auto_commit=True,
                 auto_commit_interval_ms=5000,
                 check_crcs=True,
                 metadata_max_age_ms=5 * 60 * 1000,
                 partition_assignment_strategy=(RoundRobinPartitionAssignor, ),
                 heartbeat_interval_ms=3000,
                 session_timeout_ms=30000,
                 consumer_timeout_ms=200,
                 api_version='auto'):
        if api_version not in ('auto', '0.9', '0.10'):
            raise ValueError("Unsupported Kafka API version")
        self._client = AIOKafkaClient(loop=loop,
                                      bootstrap_servers=bootstrap_servers,
                                      client_id=client_id,
                                      metadata_max_age_ms=metadata_max_age_ms,
                                      request_timeout_ms=request_timeout_ms,
                                      api_version=api_version)

        self._group_id = group_id
        self._heartbeat_interval_ms = heartbeat_interval_ms
        self._retry_backoff_ms = retry_backoff_ms
        self._enable_auto_commit = enable_auto_commit
        self._auto_commit_interval_ms = auto_commit_interval_ms
        self._partition_assignment_strategy = partition_assignment_strategy
        self._key_deserializer = key_deserializer
        self._value_deserializer = value_deserializer
        self._fetch_min_bytes = fetch_min_bytes
        self._fetch_max_wait_ms = fetch_max_wait_ms
        self._max_partition_fetch_bytes = max_partition_fetch_bytes
        self._consumer_timeout = consumer_timeout_ms / 1000
        self._check_crcs = check_crcs
        self._subscription = SubscriptionState(auto_offset_reset)
        self._fetcher = None
        self._coordinator = None
        self._closed = False
        self._loop = loop

        if topics:
            self._client.set_topics(topics)
            self._subscription.subscribe(topics=topics)

    @asyncio.coroutine
    def start(self):
        yield from self._client.bootstrap()

        if self._client.api_version < (0, 9):
            raise ValueError("Unsupported Kafka version: {}".format(
                self._client.api_version))

        self._fetcher = Fetcher(
            self._client,
            self._subscription,
            loop=self._loop,
            key_deserializer=self._key_deserializer,
            value_deserializer=self._value_deserializer,
            fetch_min_bytes=self._fetch_min_bytes,
            fetch_max_wait_ms=self._fetch_max_wait_ms,
            max_partition_fetch_bytes=self._max_partition_fetch_bytes,
            check_crcs=self._check_crcs,
            fetcher_timeout=self._consumer_timeout)

        if self._group_id is not None:
            # using group coordinator for automatic partitions assignment
            self._coordinator = GroupCoordinator(
                self._client,
                self._subscription,
                loop=self._loop,
                group_id=self._group_id,
                heartbeat_interval_ms=self._heartbeat_interval_ms,
                retry_backoff_ms=self._retry_backoff_ms,
                enable_auto_commit=self._enable_auto_commit,
                auto_commit_interval_ms=self._auto_commit_interval_ms,
                assignors=self._partition_assignment_strategy)
            self._coordinator.on_group_rebalanced(self._on_change_subscription)

            yield from self._coordinator.ensure_active_group()
        elif self._subscription.needs_partition_assignment:
            # using manual partitions assignment by topic(s)
            yield from self._client.force_metadata_update()
            partitions = []
            for topic in self._subscription.subscription:
                p_ids = self.partitions_for_topic(topic)
                if not p_ids:
                    raise UnknownTopicOrPartitionError()
                for p_id in p_ids:
                    partitions.append(TopicPartition(topic, p_id))
            self._subscription.unsubscribe()
            self._subscription.assign_from_user(partitions)
            yield from self._update_fetch_positions(
                self._subscription.missing_fetch_positions())

    def assign(self, partitions):
        """Manually assign a list of TopicPartitions to this consumer.

        Arguments:
            partitions (list of TopicPartition): assignment for this instance.

        Raises:
            IllegalStateError: if consumer has already called subscribe()

        Warning:
            It is not possible to use both manual partition assignment with
            assign() and group assignment with subscribe().

        Note:
            This interface does not support incremental assignment and will
            replace the previous assignment (if there was one).

        Note:
            Manual topic assignment through this method does not use the
            consumer's group management functionality. As such, there will be
            no rebalance operation triggered when group membership or cluster
            and topic metadata change.
        """
        for tp in partitions:
            p_ids = self.partitions_for_topic(tp.topic)
            if not p_ids or tp.partition not in p_ids:
                raise UnknownTopicOrPartitionError(tp)
        self._subscription.assign_from_user(partitions)
        self._on_change_subscription()
        self._client.set_topics([tp.topic for tp in partitions])

    def assignment(self):
        """Get the TopicPartitions currently assigned to this consumer.

        If partitions were directly assigned using assign(), then this will
        simply return the same partitions that were previously assigned.
        If topics were subscribed using subscribe(), then this will give the
        set of topic partitions currently assigned to the consumer (which may
        be none if the assignment hasn't happened yet or if the partitions are
        in the process of being reassigned).

        Returns:
            set: {TopicPartition, ...}
        """
        return self._subscription.assigned_partitions()

    @asyncio.coroutine
    def stop(self):
        """Close the consumer, waiting indefinitely for any needed cleanup."""
        if self._closed:
            return
        log.debug("Closing the KafkaConsumer.")
        self._closed = True
        if self._coordinator:
            yield from self._coordinator.close()
        if self._fetcher:
            yield from self._fetcher.close()
        yield from self._client.close()
        log.debug("The KafkaConsumer has closed.")

    @asyncio.coroutine
    def commit(self, offsets=None):
        """Commit offsets to kafka, blocking until success or error

        This commits offsets only to Kafka.
        The offsets committed using this API will be used on the first fetch
        after every rebalance and also on startup. As such, if you need to
        store offsets in anything other than Kafka, this API should not be
        used.

        Blocks until either the commit succeeds or an unrecoverable error is
        encountered (in which case it is thrown to the caller).

        Currently only supports kafka-topic offset storage (not zookeeper)

        Arguments:
            offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
                to commit with the configured group_id. Defaults to current
                consumed offsets for all subscribed partitions.
        """
        assert self._group_id is not None, 'Requires group_id'

        if offsets is None:
            offsets = self._subscription.all_consumed_offsets()
        else:
            # validate `offsets` structure
            assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
            assert all(
                map(lambda v: isinstance(v, OffsetAndMetadata),
                    offsets.values()))
        yield from self._coordinator.commit_offsets(offsets)

    @asyncio.coroutine
    def committed(self, partition):
        """Get the last committed offset for the given partition

        This offset will be used as the position for the consumer
        in the event of a failure.

        This call may block to do a remote call if the partition in question
        isn't assigned to this consumer or if the consumer hasn't yet
        initialized its cache of committed offsets.

        Arguments:
            partition (TopicPartition): the partition to check

        Returns:
            The last committed offset, or None if there was no prior commit.
        """
        assert self._group_id is not None, 'Requires group_id'
        if self._subscription.is_assigned(partition):
            committed = self._subscription.assignment[partition].committed
            if committed is None:
                yield from self._coordinator.refresh_committed_offsets()
                committed = self._subscription.assignment[partition].committed
        else:
            commit_map = yield from self._coordinator.fetch_committed_offsets(
                [partition])
            if partition in commit_map:
                committed = commit_map[partition].offset
            else:
                committed = None
        return committed

    @asyncio.coroutine
    def topics(self):
        """Get all topics the user is authorized to view.

        Returns:
            set: topics
        """
        cluster = yield from self._client.fetch_all_metadata()
        return cluster.topics()

    def partitions_for_topic(self, topic):
        """Get metadata about the partitions for a given topic.

        Arguments:
            topic (str): topic to check

        Returns:
            set: partition ids
        """
        return self._client.cluster.partitions_for_topic(topic)

    @asyncio.coroutine
    def position(self, partition):
        """Get the offset of the next record that will be fetched

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int: offset
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        offset = self._subscription.assignment[partition].position
        if offset is None:
            yield from self._update_fetch_positions(partition)
            offset = self._subscription.assignment[partition].position
        return offset

    def highwater(self, partition):
        """Last known highwater offset for a partition

        A highwater offset is the offset that will be assigned to the next
        message that is produced. It may be useful for calculating lag, by
        comparing with the reported position. Note that both position and
        highwater refer to the *next* offset -- i.e., highwater offset is
        one greater than the newest availabel message.

        Highwater offsets are returned in FetchResponse messages, so will
        not be available if not FetchRequests have been sent for this partition
        yet.

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int or None: offset if available
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        return self._subscription.assignment[partition].highwater

    def seek(self, partition, offset):
        """Manually specify the fetch offset for a TopicPartition.

        Overrides the fetch offsets that the consumer will use on the next
        poll(). If this API is invoked for the same partition more than once,
        the latest offset will be used on the next poll(). Note that you may
        lose data if this API is arbitrarily used in the middle of consumption,
        to reset the fetch offsets.

        Arguments:
            partition (TopicPartition): partition for seek operation
            offset (int): message offset in partition

        Raises:
            AssertionError: if offset is not an int >= 0;
                            or if partition is not currently assigned.
        """
        assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
        assert partition in self._subscription.assigned_partitions(), \
            'Unassigned partition'
        log.debug("Seeking to offset %s for partition %s", offset, partition)
        self._subscription.assignment[partition].seek(offset)

    @asyncio.coroutine
    def seek_to_committed(self, *partitions):
        """Seek to the committed offset for partitions

        Arguments:
            partitions: optionally provide specific TopicPartitions,
                otherwise default to all assigned partitions

        Raises:
            AssertionError: if any partition is not currently assigned, or if
                no partitions are assigned
        """
        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            for p in partitions:
                assert p in self._subscription.assigned_partitions(), \
                    'Unassigned partition'

        for tp in partitions:
            log.debug("Seeking to committed of partition %s", tp)
            offset = yield from self.committed(tp)
            if offset and offset > 0:
                self.seek(tp, offset)

    def subscribe(self, topics=(), pattern=None, listener=None):
        """Subscribe to a list of topics, or a topic regex pattern

        Partitions will be dynamically assigned via a group coordinator.
        Topic subscriptions are not incremental: this list will replace the
        current assignment (if there is one).

        This method is incompatible with assign()

        Arguments:
           topics (list): List of topics for subscription.
           pattern (str): Pattern to match available topics. You must provide
               either topics or pattern, but not both.
           listener (ConsumerRebalanceListener): Optionally include listener
               callback, which will be called before and after each rebalance
               operation.
               As part of group management, the consumer will keep track of
               the list of consumers that belong to a particular group and
               will trigger a rebalance operation if one of the following
               events trigger:

               * Number of partitions change for any of the subscribed topics
               * Topic is created or deleted
               * An existing member of the consumer group dies
               * A new member is added to the consumer group

               When any of these events are triggered, the provided listener
               will be invoked first to indicate that the consumer's
               assignment has been revoked, and then again when the new
               assignment has been received. Note that this listener will
               immediately override any listener set in a previous call
               to subscribe. It is guaranteed, however, that the partitions
               revoked/assigned
               through this interface are from topics subscribed in this call.
        Raises:
            IllegalStateError: if called after previously calling assign()
            AssertionError: if neither topics or pattern is provided
            TypeError: if listener is not a ConsumerRebalanceListener
        """
        # SubscriptionState handles error checking
        self._subscription.subscribe(topics=topics,
                                     pattern=pattern,
                                     listener=listener)

        # regex will need all topic metadata
        if pattern is not None:
            self._client.set_topics([])
            log.debug("Subscribed to topic pattern: %s", pattern)
        else:
            self._client.set_topics(self._subscription.group_subscription())
            log.debug("Subscribed to topic(s): %s", topics)

    def subscription(self):
        """Get the current topic subscription.

        Returns:
            set: {topic, ...}
        """
        return self._subscription.subscription

    def unsubscribe(self):
        """Unsubscribe from all topics and clear all assigned partitions."""
        self._subscription.unsubscribe()
        self._client.set_topics([])
        log.debug(
            "Unsubscribed all topics or patterns and assigned partitions")

    @asyncio.coroutine
    def _update_fetch_positions(self, partitions):
        """
        Set the fetch position to the committed position (if there is one)
        or reset it using the offset reset policy the user has configured.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                updating fetch positions

        Raises:
            NoOffsetForPartitionError: If no offset is stored for a given
                partition and no offset reset policy is defined
        """
        if self._group_id is not None:
            # refresh commits for all assigned partitions
            yield from self._coordinator.refresh_committed_offsets()

        # then do any offset lookups in case some positions are not known
        yield from self._fetcher.update_fetch_positions(partitions)

    def _on_change_subscription(self):
        """This is `group rebalanced` signal handler for update fetch positions
        of assigned partitions"""
        # fetch positions if we have partitions we're subscribed
        # to that we don't know the offset for
        if not self._subscription.has_all_fetch_positions():
            ensure_future(self._update_fetch_positions(
                self._subscription.missing_fetch_positions()),
                          loop=self._loop)

    @asyncio.coroutine
    def getone(self, *partitions):
        """
        Get one message from Kafka
        If no new messages prefetched, this method will wait for it

        Arguments:
            partitions (List[TopicPartition]): Optional list of partitions to
                return from. If no partitions specified then returned message
                will be from any partition, which consumer is subscribed to.

        Returns:
            ConsumerRecord

        Will return instance of

        .. code:: python

            collections.namedtuple(
                "ConsumerRecord",
                ["topic", "partition", "offset", "key", "value"])

        Example usage:


        .. code:: python

            while True:
                message = yield from consumer.getone()
                topic = message.topic
                partition = message.partition
                # Process message
                print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))

        msg = yield from self._fetcher.next_record(partitions)
        return msg

    @asyncio.coroutine
    def getmany(self, *partitions, timeout_ms=0):
        """Get messages from assigned topics / partitions.

        Prefetched messages are returned in batches by topic-partition.
        If messages is not available in the prefetched buffer this method waits
        `timeout_ms` milliseconds.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                fetching message. If no one partition specified then all
                subscribed partitions will be used
            timeout_ms (int, optional): milliseconds spent waiting if
                data is not available in the buffer. If 0, returns immediately
                with any records that are available currently in the buffer,
                else returns empty. Must not be negative. Default: 0
        Returns:
            dict: topic to list of records since the last fetch for the
                subscribed list of topics and partitions

        Example usage:


        .. code:: python

            data = yield from consumer.getmany()
            for tp, messages in data.items():
                topic = tp.topic
                partition = tp.partition
                for message in messages:
                    # Process message
                    print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))

        timeout = timeout_ms / 1000
        records = yield from self._fetcher.fetched_records(partitions, timeout)
        return records

    if PY_35:

        @asyncio.coroutine
        def __aiter__(self):
            return self

        @asyncio.coroutine
        def __anext__(self):
            """Asyncio iterator interface for consumer

            Note:
                TopicAuthorizationFailedError and OffsetOutOfRangeError
                exceptions can be raised in iterator.
                All other KafkaError exceptions will be logged and not raised
            """
            while True:
                try:
                    return (yield from self.getone())
                except (TopicAuthorizationFailedError,
                        OffsetOutOfRangeError) as err:
                    raise err
                except KafkaError as err:
                    log.error("error in consumer iterator: %s", err)
예제 #40
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in blocking on the full commit of
    the record, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to acks=1.
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, rather than immediately sending out a
            record the producer will wait for up to the given delay to allow
            other records to be sent so that the sends can be batched together.
            This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
            would have the effect of reducing the number of requests sent but
            would add up to 5ms of latency to records sent in the absense of
            load. Default: 0.
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest, maximum waiting time can
            be up to 2 * request_timeout_ms.
            Default: 30000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto

    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    def __init__(self,
                 *,
                 loop,
                 bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000,
                 request_timeout_ms=40000,
                 api_version='auto',
                 acks=1,
                 key_serializer=None,
                 value_serializer=None,
                 compression_type=None,
                 max_batch_size=16384,
                 partitioner=DefaultPartitioner(),
                 max_request_size=1048576,
                 linger_ms=0,
                 send_backoff_ms=100,
                 retry_backoff_ms=100):
        if acks not in (0, 1, -1, 'all'):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if api_version not in ('auto', '0.9', '0.8.2', '0.8.1', '0.8.0'):
            raise ValueError("Unsupported Kafka version")

        self._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                self._PRODUCER_CLIENT_ID_SEQUENCE

        if acks == 'all':
            acks = -1
        self._acks = acks
        self._api_version = api_version
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(loop=loop,
                                     bootstrap_servers=bootstrap_servers,
                                     client_id=client_id,
                                     metadata_max_age_ms=metadata_max_age_ms,
                                     request_timeout_ms=request_timeout_ms)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, self._compression_type,
            self._request_timeout_ms / 1000, loop)
        self._sender_task = None
        self._in_flight = set()
        self._closed = False
        self._loop = loop
        self._retry_backoff = retry_backoff_ms / 1000
        self._linger_time = linger_ms / 1000

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        # Check Broker Version if not set explicitly
        if self._api_version == 'auto':
            self._api_version = yield from self.client.check_version()

        # Convert api_version config to tuple for easy comparisons
        self._api_version = tuple(map(int, self._api_version.split('.')))

        if self._compression_type == 'lz4':
            assert self._api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        self._sender_task = ensure_future(self._sender_routine(),
                                          loop=self._loop)
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluser"""
        if self._closed:
            return

        # Wait untill all batches are Delivered and futures resolved
        yield from self._message_accumulator.close()

        if self._sender_task:
            self._sender_task.cancel()
            yield from self._sender_task

        yield from self.client.close()
        self._closed = True
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self._wait_on_metadata(topic))

    @asyncio.coroutine
    def _wait_on_metadata(self, topic):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for

        Returns:
            set: partition ids for the topic

        Raises:
            UnknownTopicOrPartitionError: if no topic or partitions found
                in cluster metadata
        """
        if topic in self.client.cluster.topics():
            return self._metadata.partitions_for_topic(topic)

        # add topic to metadata topic list if it is not there already.
        self.client.add_topic(topic)
        yield from self.client.force_metadata_update()
        if topic not in self.client.cluster.topics():
            raise UnknownTopicOrPartitionError()

        return self._metadata.partitions_for_topic(topic)

    @asyncio.coroutine
    def send(self, topic, value=None, key=None, partition=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.

        Returns:
            asyncio.Future: future object that will be set when message is
                            processed

        Note: The returned future will wait based on `request_timeout_ms`
            setting. Cancelling this future will not stop event from being
            sent.
        """
        assert value is not None or self._api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self._wait_on_metadata(topic)

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value, key_bytes,
                                    value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp, key_bytes, value_bytes, self._request_timeout_ms / 1000)
        return fut

    @asyncio.coroutine
    def _sender_routine(self):
        """backgroud task that sends message batches to Kafka brokers"""
        tasks = set()
        try:
            while True:
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(self._send_produce_req(
                        node_id, batches),
                                         loop=self._loop)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    self.client.force_metadata_update()
                    # Just to have at least 1 future in wait() call
                    fut = asyncio.sleep(self._retry_backoff, loop=self._loop)
                    waiters = tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters = tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                done, _ = yield from asyncio.wait(
                    waiters,
                    return_when=asyncio.FIRST_COMPLETED,
                    loop=self._loop)
                tasks -= done

        except asyncio.CancelledError:
            pass
        except Exception:  # noqa
            log.error("Unexpected error in sender routine", exc_info=True)

    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        self._in_flight.add(node_id)
        t0 = self._loop.time()
        while True:
            topics = collections.defaultdict(list)
            for tp, batch in batches.items():
                topics[tp.topic].append((tp.partition, batch.data()))

            request = ProduceRequest(required_acks=self._acks,
                                     timeout=self._request_timeout_ms,
                                     topics=list(topics.items()))

            try:
                response = yield from self.client.send(node_id, request)
            except KafkaError as err:
                for batch in batches.values():
                    if not err.retriable or batch.expired():
                        batch.done(exception=err)
                log.warning("Got error produce response: %s", err)
                if not err.retriable:
                    break
            else:
                if response is None:
                    # noacks, just "done" batches
                    for batch in batches.values():
                        batch.done()
                    break

                for topic, partitions in response.topics:
                    for partition, error_code, offset in partitions:
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset)
                        elif not getattr(error, 'retriable', False) or \
                                batch.expired():
                            batch.done(exception=error())
                        else:
                            # Ok, we can retry this batch
                            batches[tp] = batch
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)

            if batches:
                yield from asyncio.sleep(self._retry_backoff, loop=self._loop)
            else:
                break

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value, serialized_key,
                   serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(serialized_key, all_partitions, available)
예제 #41
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in waiting for all replicas to
    respond, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to *acks=1*. If ``enable_idempotence`` is
            ``True`` defaults to *acks=all*
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, if first request is processed faster,
            than `linger_ms`, producer will wait `linger_ms - process_time`.
            This setting defaults to 0 (i.e. no delay).
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the Java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest (it's a blocking call),
            maximum waiting time can be up to 2 * request_timeout_ms.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
            socket connections. Directly passed into asyncio's
            `create_connection`_. For more information see :ref:`ssl_auth`.
            Default: None.
        connections_max_idle_ms (int): Close idle connections after the number
            of milliseconds specified by this config. Specifying `None` will
            disable idle checks. Default: 540000 (9hours).
        enable_idempotence (bool): When set to ``True``, the producer will
            ensure that exactly one copy of each message is written in the
            stream. If ``False``, producer retries due to broker failures,
            etc., may write duplicates of the retried message in the stream.
            Note that enabling idempotence acks to set to 'all'. If it is not
            explicitly set by the user it will be chosen. If incompatible
            values are set, a ``ValueError`` will be thrown.
            New in version 0.5.0.
        sasl_mechanism (str): Authentication mechanism when security_protocol
            is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
            PLAIN, GSSAPI. Default: PLAIN
        sasl_plain_username (str): username for sasl PLAIN authentication.
            Default: None
        sasl_plain_password (str): password for sasl PLAIN authentication.
            Default: None

    Note:
        Many configuration parameters are taken from the Java client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    _COMPRESSORS = {
        'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
        'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
        'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
    }

    _closed = None  # Serves as an uninitialized flag for __del__
    _source_traceback = None

    def __init__(self, *, loop, bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000, request_timeout_ms=40000,
                 api_version='auto', acks=_missing,
                 key_serializer=None, value_serializer=None,
                 compression_type=None, max_batch_size=16384,
                 partitioner=DefaultPartitioner(), max_request_size=1048576,
                 linger_ms=0, send_backoff_ms=100,
                 retry_backoff_ms=100, security_protocol="PLAINTEXT",
                 ssl_context=None, connections_max_idle_ms=540000,
                 enable_idempotence=False, transactional_id=None,
                 transaction_timeout_ms=60000, sasl_mechanism="PLAIN",
                 sasl_plain_password=None, sasl_plain_username=None,
                 sasl_kerberos_service_name='kafka',
                 sasl_kerberos_domain_name=None):
        if acks not in (0, 1, -1, 'all', _missing):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if compression_type:
            checker, compression_attrs = self._COMPRESSORS[compression_type]
            if not checker():
                raise RuntimeError("Compression library for {} not found"
                                   .format(compression_type))
        else:
            compression_attrs = 0

        if transactional_id is not None:
            enable_idempotence = True
        else:
            transaction_timeout_ms = INTEGER_MAX_VALUE

        if enable_idempotence:
            if acks is _missing:
                acks = -1
            elif acks not in ('all', -1):
                raise ValueError(
                    "acks={} not supported if enable_idempotence=True"
                    .format(acks))
            self._txn_manager = TransactionManager(
                transactional_id, transaction_timeout_ms, loop=loop)
        else:
            self._txn_manager = None

        if acks is _missing:
            acks = 1
        elif acks == 'all':
            acks = -1

        AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE

        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms,
            retry_backoff_ms=retry_backoff_ms,
            api_version=api_version, security_protocol=security_protocol,
            ssl_context=ssl_context,
            connections_max_idle_ms=connections_max_idle_ms,
            sasl_mechanism=sasl_mechanism,
            sasl_plain_username=sasl_plain_username,
            sasl_plain_password=sasl_plain_password,
            sasl_kerberos_service_name=sasl_kerberos_service_name,
            sasl_kerberos_domain_name=sasl_kerberos_domain_name)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, compression_attrs,
            self._request_timeout_ms / 1000, txn_manager=self._txn_manager,
            loop=loop)
        self._sender = Sender(
            self.client, acks=acks, txn_manager=self._txn_manager,
            retry_backoff_ms=retry_backoff_ms, linger_ms=linger_ms,
            message_accumulator=self._message_accumulator,
            request_timeout_ms=request_timeout_ms,
            loop=loop)

        self._loop = loop
        if loop.get_debug():
            self._source_traceback = traceback.extract_stack(sys._getframe(1))
        self._closed = False

    if PY_341:
        # Warn if producer was not closed properly
        # We don't attempt to close the Consumer, as __del__ is synchronous
        def __del__(self, _warnings=warnings):
            if self._closed is False:
                if PY_36:
                    kwargs = {'source': self}
                else:
                    kwargs = {}
                _warnings.warn("Unclosed AIOKafkaProducer {!r}".format(self),
                               ResourceWarning,
                               **kwargs)
                context = {'producer': self,
                           'message': 'Unclosed AIOKafkaProducer'}
                if self._source_traceback is not None:
                    context['source_traceback'] = self._source_traceback
                self._loop.call_exception_handler(context)

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        if self._compression_type == 'lz4':
            assert self.client.api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        if self._txn_manager is not None and self.client.api_version < (0, 11):
            raise UnsupportedVersionError(
                "Idempotent producer available only for Broker vesion 0.11"
                " and above")

        yield from self._sender.start()
        self._message_accumulator.set_api_version(self.client.api_version)
        self._producer_magic = 0 if self.client.api_version < (0, 10) else 1
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def flush(self):
        """Wait untill all batches are Delivered and futures resolved"""
        yield from self._message_accumulator.flush()

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluster"""
        if self._closed:
            return
        self._closed = True

        # If the sender task is down there is no way for accumulator to flush
        if self._sender is not None and self._sender.sender_task is not None:
            yield from asyncio.wait([
                self._message_accumulator.close(),
                self._sender.sender_task],
                return_when=asyncio.FIRST_COMPLETED,
                loop=self._loop)

            yield from self._sender.close()

        yield from self.client.close()
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self.client._wait_on_metadata(topic))

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = LegacyRecordBatchBuilder.record_overhead(
            self._producer_magic)
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value,
                   serialized_key, serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(
            serialized_key, all_partitions, available)

    @asyncio.coroutine
    def send(self, topic, value=None, key=None, partition=None,
             timestamp_ms=None, headers=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.
            timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970
                UTC) to use as the message timestamp. Defaults to current time.

        Returns:
            asyncio.Future: object that will be set when message is
            processed

        Raises:
            kafka.KafkaTimeoutError: if we can't schedule this record (
                pending buffer is full) in up to `request_timeout_ms`
                milliseconds.

        Note:
            The returned future will wait based on `request_timeout_ms`
            setting. Cancelling the returned future **will not** stop event
            from being sent, but cancelling the ``send`` coroutine itself
            **will**.
        """
        assert value is not None or self.client.api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)

        # Ensure transaction is started and not committing
        if self._txn_manager is not None:
            txn_manager = self._txn_manager
            if txn_manager.transactional_id is not None and \
                    not self._txn_manager.is_in_transaction():
                raise IllegalOperation(
                    "Can't send messages while not in transaction")

        if headers is not None:
            if self.client.api_version < (0, 11):
                raise UnsupportedVersionError(
                    "Headers not supported before Kafka 0.11")
        else:
            # Record parser/builder support only list type, no explicit None
            headers = []

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value,
                                    key_bytes, value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp, key_bytes, value_bytes, self._request_timeout_ms / 1000,
            timestamp_ms=timestamp_ms, headers=headers)
        return fut

    @asyncio.coroutine
    def send_and_wait(self, topic, value=None, key=None, partition=None,
                      timestamp_ms=None):
        """Publish a message to a topic and wait the result"""
        future = yield from self.send(
            topic, value, key, partition, timestamp_ms)
        return (yield from future)

    def create_batch(self):
        """Create and return an empty BatchBuilder.

        The batch is not queued for send until submission to ``send_batch``.

        Returns:
            BatchBuilder: empty batch to be filled and submitted by the caller.
        """
        return self._message_accumulator.create_builder()

    @asyncio.coroutine
    def send_batch(self, batch, topic, *, partition):
        """Submit a BatchBuilder for publication.

        Arguments:
            batch (BatchBuilder): batch object to be published.
            topic (str): topic where the batch will be published.
            partition (int): partition where this batch will be published.

        Returns:
            asyncio.Future: object that will be set when the batch is
                delivered.
        """
        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)
        # We only validate we have the partition in the metadata here
        partition = self._partition(topic, partition, None, None, None, None)

        # Ensure transaction is started and not committing
        if self._txn_manager is not None:
            txn_manager = self._txn_manager
            if txn_manager.transactional_id is not None and \
                    not self._txn_manager.is_in_transaction():
                raise IllegalOperation(
                    "Can't send messages while not in transaction")

        tp = TopicPartition(topic, partition)
        log.debug("Sending batch to %s", tp)
        future = yield from self._message_accumulator.add_batch(
            batch, tp, self._request_timeout_ms / 1000)
        return future

    def _ensure_transactional(self):
        if self._txn_manager is None or \
                self._txn_manager.transactional_id is None:
            raise IllegalOperation(
                "You need to configure transaction_id to use transactions")

    @asyncio.coroutine
    def begin_transaction(self):
        self._ensure_transactional()
        log.debug(
            "Beginning a new transaction for id %s",
            self._txn_manager.transactional_id)
        yield from asyncio.shield(
            self._txn_manager.wait_for_pid(),
            loop=self._loop
        )
        self._txn_manager.begin_transaction()

    @asyncio.coroutine
    def commit_transaction(self):
        self._ensure_transactional()
        log.debug(
            "Committing transaction for id %s",
            self._txn_manager.transactional_id)
        self._txn_manager.committing_transaction()
        yield from asyncio.shield(
            self._txn_manager.wait_for_transaction_end(),
            loop=self._loop
        )

    @asyncio.coroutine
    def abort_transaction(self):
        self._ensure_transactional()
        log.debug(
            "Aborting transaction for id %s",
            self._txn_manager.transactional_id)
        self._txn_manager.aborting_transaction()
        yield from asyncio.shield(
            self._txn_manager.wait_for_transaction_end(),
            loop=self._loop
        )

    def transaction(self):
        return TransactionContext(self)

    @asyncio.coroutine
    def send_offsets_to_transaction(self, offsets, group_id):
        self._ensure_transactional()

        if not self._txn_manager.is_in_transaction():
            raise IllegalOperation("Not in the middle of a transaction")

        if not group_id or not isinstance(group_id, str):
            raise ValueError(group_id)

        # validate `offsets` structure
        formatted_offsets = commit_structure_validate(offsets)

        log.debug(
            "Begin adding offsets %s for consumer group %s to transaction",
            formatted_offsets, group_id)
        fut = self._txn_manager.add_offsets_to_txn(formatted_offsets, group_id)
        yield from asyncio.shield(fut, loop=self._loop)
예제 #42
0
    def test_consumer_rebalance_on_new_topic(self):
        # Test will create a consumer group and check if adding new topic
        # will trigger a group rebalance and assign partitions
        pattern = "^another-autocreate-pattern-.*$"
        client = AIOKafkaClient(
            loop=self.loop, bootstrap_servers=self.hosts,
            client_id="test_autocreate")
        yield from client.bootstrap()
        listener1 = StubRebalanceListener(loop=self.loop)
        listener2 = StubRebalanceListener(loop=self.loop)
        consumer1 = AIOKafkaConsumer(
            loop=self.loop, bootstrap_servers=self.hosts,
            metadata_max_age_ms=200, group_id="test-autocreate-rebalance",
            heartbeat_interval_ms=100)
        consumer1.subscribe(pattern=pattern, listener=listener1)
        yield from consumer1.start()
        consumer2 = AIOKafkaConsumer(
            loop=self.loop, bootstrap_servers=self.hosts,
            metadata_max_age_ms=200, group_id="test-autocreate-rebalance",
            heartbeat_interval_ms=100)
        consumer2.subscribe(pattern=pattern, listener=listener2)
        yield from consumer2.start()
        yield from asyncio.sleep(0.5, loop=self.loop)
        # bootstrap will take care of the initial group assignment
        self.assertEqual(consumer1.assignment(), set())
        self.assertEqual(consumer2.assignment(), set())
        listener1.reset()
        listener2.reset()

        # Lets force autocreation of a topic
        my_topic = "another-autocreate-pattern-1"
        yield from client._wait_on_metadata(my_topic)

        # Wait for group to stabilize
        assign1 = yield from listener1.wait_assign()
        assign2 = yield from listener2.wait_assign()
        # We expect 2 partitons for autocreated topics
        my_partitions = set([
            TopicPartition(my_topic, 0), TopicPartition(my_topic, 1)])
        self.assertEqual(assign1 | assign2, my_partitions)
        self.assertEqual(
            consumer1.assignment() | consumer2.assignment(),
            my_partitions)

        # Lets add another topic
        listener1.reset()
        listener2.reset()
        my_topic2 = "another-autocreate-pattern-2"
        yield from client._wait_on_metadata(my_topic2)

        # Wait for group to stabilize
        assign1 = yield from listener1.wait_assign()
        assign2 = yield from listener2.wait_assign()
        # We expect 2 partitons for autocreated topics
        my_partitions = set([
            TopicPartition(my_topic, 0), TopicPartition(my_topic, 1),
            TopicPartition(my_topic2, 0), TopicPartition(my_topic2, 1)])
        self.assertEqual(assign1 | assign2, my_partitions)
        self.assertEqual(
            consumer1.assignment() | consumer2.assignment(),
            my_partitions)

        yield from consumer1.stop()
        yield from consumer2.stop()
        yield from client.close()
예제 #43
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in blocking on the full commit of
    the record, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to acks=1.
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, rather than immediately sending out a
            record the producer will wait for up to the given delay to allow
            other records to be sent so that the sends can be batched together.
            This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
            would have the effect of reducing the number of requests sent but
            would add up to 5ms of latency to records sent in the absense of
            load. Default: 0.
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest, maximum waiting time can
            be up to 2 * request_timeout_ms.
            Default: 30000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto

    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    def __init__(self, *, loop, bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000, request_timeout_ms=40000,
                 api_version='auto', acks=1,
                 key_serializer=None, value_serializer=None,
                 compression_type=None, max_batch_size=16384,
                 partitioner=DefaultPartitioner(), max_request_size=1048576,
                 linger_ms=0, send_backoff_ms=100,
                 retry_backoff_ms=100):
        if acks not in (0, 1, -1, 'all'):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if api_version not in ('auto', '0.9', '0.8.2', '0.8.1', '0.8.0'):
            raise ValueError("Unsupported Kafka version")

        self._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                self._PRODUCER_CLIENT_ID_SEQUENCE

        if acks == 'all':
            acks = -1
        self._acks = acks
        self._api_version = api_version
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, self._compression_type,
            self._request_timeout_ms/1000, loop)
        self._sender_task = None
        self._in_flight = set()
        self._closed = False
        self._loop = loop
        self._retry_backoff = retry_backoff_ms / 1000
        self._linger_time = linger_ms / 1000

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        # Check Broker Version if not set explicitly
        if self._api_version == 'auto':
            self._api_version = yield from self.client.check_version()

        # Convert api_version config to tuple for easy comparisons
        self._api_version = tuple(
            map(int, self._api_version.split('.')))

        if self._compression_type == 'lz4':
            assert self._api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        self._sender_task = ensure_future(
            self._sender_routine(), loop=self._loop)
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluser"""
        if self._closed:
            return

        # Wait untill all batches are Delivered and futures resolved
        yield from self._message_accumulator.close()

        if self._sender_task:
            self._sender_task.cancel()
            yield from self._sender_task

        yield from self.client.close()
        self._closed = True
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self._wait_on_metadata(topic))

    @asyncio.coroutine
    def _wait_on_metadata(self, topic):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for

        Returns:
            set: partition ids for the topic

        Raises:
            UnknownTopicOrPartitionError: if no topic or partitions found
                in cluster metadata
        """
        if topic in self.client.cluster.topics():
            return self._metadata.partitions_for_topic(topic)

        # add topic to metadata topic list if it is not there already.
        self.client.add_topic(topic)
        yield from self.client.force_metadata_update()
        if topic not in self.client.cluster.topics():
            raise UnknownTopicOrPartitionError()

        return self._metadata.partitions_for_topic(topic)

    @asyncio.coroutine
    def send(self, topic, value=None, key=None, partition=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.

        Returns:
            asyncio.Future: future object that will be set when message is
                            processed

        Note: The returned future will wait based on `request_timeout_ms`
            setting. Cancelling this future will not stop event from being
            sent.
        """
        assert value is not None or self._api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self._wait_on_metadata(topic)

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value,
                                    key_bytes, value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp, key_bytes, value_bytes, self._request_timeout_ms / 1000)
        return fut

    @asyncio.coroutine
    def _sender_routine(self):
        """backgroud task that sends message batches to Kafka brokers"""
        tasks = set()
        try:
            while True:
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(
                        self._send_produce_req(node_id, batches),
                        loop=self._loop)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    self.client.force_metadata_update()
                    # Just to have at least 1 future in wait() call
                    fut = asyncio.sleep(self._retry_backoff, loop=self._loop)
                    waiters = tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters = tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                done, _ = yield from asyncio.wait(
                    waiters,
                    return_when=asyncio.FIRST_COMPLETED,
                    loop=self._loop)
                tasks -= done

        except asyncio.CancelledError:
            pass
        except Exception:  # noqa
            log.error("Unexpected error in sender routine", exc_info=True)

    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        self._in_flight.add(node_id)
        t0 = self._loop.time()
        while True:
            topics = collections.defaultdict(list)
            for tp, batch in batches.items():
                topics[tp.topic].append((tp.partition, batch.data()))

            request = ProduceRequest(
                required_acks=self._acks,
                timeout=self._request_timeout_ms,
                topics=list(topics.items()))

            try:
                response = yield from self.client.send(node_id, request)
            except KafkaError as err:
                for batch in batches.values():
                    if not err.retriable or batch.expired():
                        batch.done(exception=err)
                log.warning(
                    "Got error produce response: %s", err)
                if not err.retriable:
                    break
            else:
                if response is None:
                    # noacks, just "done" batches
                    for batch in batches.values():
                        batch.done()
                    break

                for topic, partitions in response.topics:
                    for partition, error_code, offset in partitions:
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset)
                        elif not getattr(error, 'retriable', False) or \
                                batch.expired():
                            batch.done(exception=error())
                        else:
                            # Ok, we can retry this batch
                            batches[tp] = batch
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)

            if batches:
                yield from asyncio.sleep(
                    self._retry_backoff, loop=self._loop)
            else:
                break

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value,
                   serialized_key, serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(
            serialized_key, all_partitions, available)
예제 #44
0
class AIOKafkaConsumer(object):
    """Consume records from a Kafka cluster.

    The consumer will transparently handle the failure of servers in the Kafka
    cluster, and adapt as topic-partitions are created or migrate between
    brokers. It also interacts with the assigned kafka Group Coordinator node
    to allow multiple consumers to load balance consumption of topics (requires
    kafka >= 0.9.0.0).

    Arguments:
        *topics (str): optional list of topics to subscribe to. If not set,
            call subscribe() or assign() before consuming records.
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'aiokafka-{version}'
        group_id (str or None): name of the consumer group to join for dynamic
            partition assignment (if enabled), and to use for fetching and
            committing offsets. If None, auto-partition assignment (via
            group coordinator) and offset commits are disabled.
            Default: None
        key_deserializer (callable): Any callable that takes a
            raw message key and returns a deserialized key.
        value_deserializer (callable, optional): Any callable that takes a
            raw message value and returns a deserialized value.
        fetch_min_bytes (int): Minimum amount of data the server should
            return for a fetch request, otherwise wait up to
            fetch_max_wait_ms for more data to accumulate. Default: 1.
        fetch_max_wait_ms (int): The maximum amount of time in milliseconds
            the server will block before answering the fetch request if
            there isn't sufficient data to immediately satisfy the
            requirement given by fetch_min_bytes. Default: 500.
        max_partition_fetch_bytes (int): The maximum amount of data
            per-partition the server will return. The maximum total memory
            used for a request = #partitions * max_partition_fetch_bytes.
            This size must be at least as large as the maximum message size
            the server allows or else it is possible for the producer to
            send messages larger than the consumer can fetch. If that
            happens, the consumer can get stuck trying to fetch a large
            message on a certain partition. Default: 1048576.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        reconnect_backoff_ms (int): The amount of time in milliseconds to
            wait before attempting to reconnect to a given host.
            Default: 50.
        auto_offset_reset (str): A policy for resetting offsets on
            OffsetOutOfRange errors: 'earliest' will move to the oldest
            available message, 'latest' will move to the most recent. Any
            ofther value will raise the exception. Default: 'latest'.
        enable_auto_commit (bool): If true the consumer's offset will be
            periodically committed in the background. Default: True.
        auto_commit_interval_ms (int): milliseconds between automatic
            offset commits, if enable_auto_commit is True. Default: 5000.
        check_crcs (bool): Automatically check the CRC32 of the records
            consumed. This ensures no on-the-wire or on-disk corruption to
            the messages occurred. This check adds some overhead, so it may
            be disabled in cases seeking extreme performance. Default: True
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        partition_assignment_strategy (list): List of objects to use to
            distribute partition ownership amongst consumer instances when
            group management is used. This preference is implicit in the order
            of the strategies in the list. When assignment strategy changes:
            to support a change to the assignment strategy, new versions must
            enable support both for the old assignment strategy and the new
            one. The coordinator will choose the old assignment strategy until
            all members have been updated. Then it will choose the new
            strategy. Default: [RoundRobinPartitionAssignor]
        heartbeat_interval_ms (int): The expected time in milliseconds
            between heartbeats to the consumer coordinator when using
            Kafka's group management feature. Heartbeats are used to ensure
            that the consumer's session stays active and to facilitate
            rebalancing when new consumers join or leave the group. The
            value must be set lower than session_timeout_ms, but typically
            should be set no higher than 1/3 of that value. It can be
            adjusted even lower to control the expected time for normal
            rebalances. Default: 3000
        session_timeout_ms (int): The timeout used to detect failures when
            using Kafka's group managementment facilities. Default: 30000
        consumer_timeout_ms (int): number of millisecond to poll available
            fetched messages. Default: 100
        api_version (str): specify which kafka API version to use.
            AIOKafkaConsumer supports Kafka API versions >=0.9 only.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto


    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#newconsumerconfigs

    """
    def __init__(self, *topics, loop,
                 bootstrap_servers='localhost',
                 client_id='aiokafka-'+__version__,
                 group_id=None,
                 key_deserializer=None, value_deserializer=None,
                 fetch_max_wait_ms=500,
                 fetch_min_bytes=1,
                 max_partition_fetch_bytes=1 * 1024 * 1024,
                 request_timeout_ms=40 * 1000,
                 retry_backoff_ms=100,
                 reconnect_backoff_ms=50,
                 auto_offset_reset='latest',
                 enable_auto_commit=True,
                 auto_commit_interval_ms=5000,
                 check_crcs=True,
                 metadata_max_age_ms=5 * 60 * 1000,
                 partition_assignment_strategy=(RoundRobinPartitionAssignor,),
                 heartbeat_interval_ms=3000,
                 session_timeout_ms=30000,
                 consumer_timeout_ms=100,
                 api_version='auto'):
        if api_version not in ('auto', '0.9'):
            raise ValueError("Unsupported Kafka API version")
        self._client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms)

        self._api_version = api_version
        self._group_id = group_id
        self._heartbeat_interval_ms = heartbeat_interval_ms
        self._retry_backoff_ms = retry_backoff_ms
        self._enable_auto_commit = enable_auto_commit
        self._auto_commit_interval_ms = auto_commit_interval_ms
        self._partition_assignment_strategy = partition_assignment_strategy
        self._key_deserializer = key_deserializer
        self._value_deserializer = value_deserializer
        self._fetch_min_bytes = fetch_min_bytes
        self._fetch_max_wait_ms = fetch_max_wait_ms
        self._max_partition_fetch_bytes = max_partition_fetch_bytes
        self._consumer_timeout = consumer_timeout_ms / 1000
        self._check_crcs = check_crcs
        self._subscription = SubscriptionState(auto_offset_reset)
        self._fetcher = None
        self._coordinator = None
        self._closed = False
        self._loop = loop
        self._topics = topics

        if topics:
            self._client.set_topics(topics)
            self._subscription.subscribe(topics=topics)

    @asyncio.coroutine
    def start(self):
        yield from self._client.bootstrap()

        # Check Broker Version if not set explicitly
        if self._api_version == 'auto':
            self._api_version = yield from self._client.check_version()
        # Convert api_version config to tuple for easy comparisons
        self._api_version = tuple(
            map(int, self._api_version.split('.')))

        if self._api_version < (0, 9):
            raise ValueError(
                "Unsupported Kafka version: {}".format(self._api_version))

        self._fetcher = Fetcher(
            self._client, self._subscription, loop=self._loop,
            key_deserializer=self._key_deserializer,
            value_deserializer=self._value_deserializer,
            fetch_min_bytes=self._fetch_min_bytes,
            fetch_max_wait_ms=self._fetch_max_wait_ms,
            max_partition_fetch_bytes=self._max_partition_fetch_bytes,
            check_crcs=self._check_crcs,
            fetcher_timeout=self._consumer_timeout)

        if self._group_id is not None:
            # using group coordinator for automatic partitions assignment
            self._coordinator = GroupCoordinator(
                self._client, self._subscription, loop=self._loop,
                group_id=self._group_id,
                heartbeat_interval_ms=self._heartbeat_interval_ms,
                retry_backoff_ms=self._retry_backoff_ms,
                enable_auto_commit=self._enable_auto_commit,
                auto_commit_interval_ms=self._auto_commit_interval_ms,
                assignors=self._partition_assignment_strategy)
            self._coordinator.on_group_rebalanced(
                self._on_change_subscription)

            yield from self._coordinator.ensure_active_group()
        elif self._subscription.needs_partition_assignment:
            # using manual partitions assignment by topic(s)
            yield from self._client.force_metadata_update()
            partitions = []
            for topic in self._topics:
                p_ids = self.partitions_for_topic(topic)
                for p_id in p_ids:
                    partitions.append(TopicPartition(topic, p_id))
            self._subscription.unsubscribe()
            self._subscription.assign_from_user(partitions)
            yield from self._update_fetch_positions(
                self._subscription.missing_fetch_positions())

    def assign(self, partitions):
        """Manually assign a list of TopicPartitions to this consumer.

        Arguments:
            partitions (list of TopicPartition): assignment for this instance.

        Raises:
            IllegalStateError: if consumer has already called subscribe()

        Warning:
            It is not possible to use both manual partition assignment with
            assign() and group assignment with subscribe().

        Note:
            This interface does not support incremental assignment and will
            replace the previous assignment (if there was one).

        Note:
            Manual topic assignment through this method does not use the
            consumer's group management functionality. As such, there will be
            no rebalance operation triggered when group membership or cluster
            and topic metadata change.
        """
        self._subscription.assign_from_user(partitions)
        self._on_change_subscription()
        self._client.set_topics([tp.topic for tp in partitions])

    def assignment(self):
        """Get the TopicPartitions currently assigned to this consumer.

        If partitions were directly assigned using assign(), then this will
        simply return the same partitions that were previously assigned.
        If topics were subscribed using subscribe(), then this will give the
        set of topic partitions currently assigned to the consumer (which may
        be none if the assignment hasn't happened yet or if the partitions are
        in the process of being reassigned).

        Returns:
            set: {TopicPartition, ...}
        """
        return self._subscription.assigned_partitions()

    @asyncio.coroutine
    def stop(self):
        """Close the consumer, waiting indefinitely for any needed cleanup."""
        if self._closed:
            return
        log.debug("Closing the KafkaConsumer.")
        self._closed = True
        if self._coordinator:
            yield from self._coordinator.close()
        if self._fetcher:
            yield from self._fetcher.close()
        yield from self._client.close()
        log.debug("The KafkaConsumer has closed.")

    @asyncio.coroutine
    def commit(self, offsets=None):
        """Commit offsets to kafka, blocking until success or error

        This commits offsets only to Kafka.
        The offsets committed using this API will be used on the first fetch
        after every rebalance and also on startup. As such, if you needto store
        offsets in anything other than Kafka, this API should not be used.

        Blocks until either the commit succeeds or an unrecoverable error is
        encountered (in which case it is thrown to the caller).

        Currently only supports kafka-topic offset storage (not zookeeper)

        Arguments:
            offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
                to commit with the configured group_id. Defaults to current
                consumed offsets for all subscribed partitions.
        """
        assert self._group_id is not None, 'Requires group_id'

        if offsets is None:
            offsets = self._subscription.all_consumed_offsets()
        else:
            # validate `offsets` structure
            assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
            assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
                           offsets.values()))
        yield from self._coordinator.commit_offsets(offsets)

    @asyncio.coroutine
    def committed(self, partition):
        """Get the last committed offset for the given partition

        This offset will be used as the position for the consumer
        in the event of a failure.

        This call may block to do a remote call if the partition in question
        isn't assigned to this consumer or if the consumer hasn't yet
        initialized its cache of committed offsets.

        Arguments:
            partition (TopicPartition): the partition to check

        Returns:
            The last committed offset, or None if there was no prior commit.
        """
        assert self._group_id is not None, 'Requires group_id'
        if self._subscription.is_assigned(partition):
            committed = self._subscription.assignment[partition].committed
            if committed is None:
                yield from self._coordinator.refresh_committed_offsets()
                committed = self._subscription.assignment[partition].committed
        else:
            commit_map = yield from self._coordinator.fetch_committed_offsets(
                [partition])
            if partition in commit_map:
                committed = commit_map[partition].offset
            else:
                committed = None
        return committed

    @asyncio.coroutine
    def topics(self):
        """Get all topics the user is authorized to view.

        Returns:
            set: topics
        """
        cluster = yield from self._client.fetch_all_metadata()
        return cluster.topics()

    def partitions_for_topic(self, topic):
        """Get metadata about the partitions for a given topic.

        Arguments:
            topic (str): topic to check

        Returns:
            set: partition ids
        """
        return self._client.cluster.partitions_for_topic(topic)

    @asyncio.coroutine
    def position(self, partition):
        """Get the offset of the next record that will be fetched

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int: offset
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        offset = self._subscription.assignment[partition].position
        if offset is None:
            yield from self._update_fetch_positions(partition)
            offset = self._subscription.assignment[partition].position
        return offset

    def highwater(self, partition):
        """Last known highwater offset for a partition

        A highwater offset is the offset that will be assigned to the next
        message that is produced. It may be useful for calculating lag, by
        comparing with the reported position. Note that both position and
        highwater refer to the *next* offset -- i.e., highwater offset is
        one greater than the newest availabel message.

        Highwater offsets are returned in FetchResponse messages, so will
        not be available if not FetchRequests have been sent for this partition
        yet.

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int or None: offset if available
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        return self._subscription.assignment[partition].highwater

    def seek(self, partition, offset):
        """Manually specify the fetch offset for a TopicPartition.

        Overrides the fetch offsets that the consumer will use on the next
        poll(). If this API is invoked for the same partition more than once,
        the latest offset will be used on the next poll(). Note that you may
        lose data if this API is arbitrarily used in the middle of consumption,
        to reset the fetch offsets.

        Arguments:
            partition (TopicPartition): partition for seek operation
            offset (int): message offset in partition

        Raises:
            AssertionError: if offset is not an int >= 0;
                            or if partition is not currently assigned.
        """
        assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
        assert partition in self._subscription.assigned_partitions(), \
            'Unassigned partition'
        log.debug("Seeking to offset %s for partition %s", offset, partition)
        self._subscription.assignment[partition].seek(offset)

    @asyncio.coroutine
    def seek_to_committed(self, *partitions):
        """Seek to the committed offset for partitions

        Arguments:
            partitions: optionally provide specific TopicPartitions,
                otherwise default to all assigned partitions

        Raises:
            AssertionError: if any partition is not currently assigned, or if
                no partitions are assigned
        """
        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            for p in partitions:
                assert p in self._subscription.assigned_partitions(), \
                    'Unassigned partition'

        for tp in partitions:
            log.debug("Seeking to committed of partition %s", tp)
            offset = yield from self.committed(tp)
            if offset and offset > 0:
                self.seek(tp, offset)

    def subscribe(self, topics=(), pattern=None, listener=None):
        """Subscribe to a list of topics, or a topic regex pattern

        Partitions will be dynamically assigned via a group coordinator.
        Topic subscriptions are not incremental: this list will replace the
        current assignment (if there is one).

        This method is incompatible with assign()

        Arguments:
           topics (list): List of topics for subscription.
           pattern (str): Pattern to match available topics. You must provide
               either topics or pattern, but not both.
           listener (ConsumerRebalanceListener): Optionally include listener
               callback, which will be called before and after each rebalance
               operation.
               As part of group management, the consumer will keep track of
               the list of consumers that belong to a particular group and
               will trigger a rebalance operation if one of the following
               events trigger:

               * Number of partitions change for any of the subscribed topics
               * Topic is created or deleted
               * An existing member of the consumer group dies
               * A new member is added to the consumer group

               When any of these events are triggered, the provided listener
               will be invoked first to indicate that the consumer's
               assignment has been revoked, and then again when the new
               assignment has been received. Note that this listener will
               immediately override any listener set in a previous call
               to subscribe. It is guaranteed, however, that the partitions
               revoked/assigned
               through this interface are from topics subscribed in this call.
        Raises:
            IllegalStateError: if called after previously calling assign()
            AssertionError: if neither topics or pattern is provided
            TypeError: if listener is not a ConsumerRebalanceListener
        """
        # SubscriptionState handles error checking
        self._subscription.subscribe(topics=topics,
                                     pattern=pattern,
                                     listener=listener)

        # regex will need all topic metadata
        if pattern is not None:
            self._client.set_topics([])
            log.debug("Subscribed to topic pattern: %s", pattern)
        else:
            self._client.set_topics(self._subscription.group_subscription())
            log.debug("Subscribed to topic(s): %s", topics)

    def subscription(self):
        """Get the current topic subscription.

        Returns:
            set: {topic, ...}
        """
        return self._subscription.subscription

    def unsubscribe(self):
        """Unsubscribe from all topics and clear all assigned partitions."""
        self._subscription.unsubscribe()
        self._client.set_topics([])
        log.debug(
            "Unsubscribed all topics or patterns and assigned partitions")

    @asyncio.coroutine
    def _update_fetch_positions(self, partitions):
        """
        Set the fetch position to the committed position (if there is one)
        or reset it using the offset reset policy the user has configured.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                updating fetch positions

        Raises:
            NoOffsetForPartitionError: If no offset is stored for a given
                partition and no offset reset policy is defined
        """
        if self._group_id is not None:
            # refresh commits for all assigned partitions
            yield from self._coordinator.refresh_committed_offsets()

        # then do any offset lookups in case some positions are not known
        yield from self._fetcher.update_fetch_positions(partitions)

    def _on_change_subscription(self):
        """This is `group rebalanced` signal handler for update fetch positions
        of assigned partitions"""
        # fetch positions if we have partitions we're subscribed
        # to that we don't know the offset for
        if not self._subscription.has_all_fetch_positions():
            ensure_future(self._update_fetch_positions(
                self._subscription.missing_fetch_positions()),
                loop=self._loop)

    @asyncio.coroutine
    def getone(self, *partitions):
        """
        Get one message from Kafka
        If no new messages prefetched, this method will wait for it

        Arguments:
            partitions (List[TopicPartition]): Optional list of partitions to
                return from. If no partitions specified then returned message
                will be from any partition, which consumer is subscribed to.

        Returns:
            ConsumerRecord

        Will return instance of

        .. code:: python

            collections.namedtuple(
                "ConsumerRecord",
                ["topic", "partition", "offset", "key", "value"])

        Example usage:


        .. code:: python

            while True:
                message = yield from consumer.getone()
                topic = message.topic
                partition = message.partition
                # Process message
                print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))

        msg = yield from self._fetcher.next_record(partitions)
        return msg

    @asyncio.coroutine
    def getmany(self, *partitions, timeout_ms=0):
        """Get messages from assigned topics / partitions.

        Prefetched messages are returned in batches by topic-partition.
        If messages is not available in the prefetched buffer this method waits
        `timeout_ms` milliseconds.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                fetching message. If no one partition specified then all
                subscribed partitions will be used
            timeout_ms (int, optional): milliseconds spent waiting if
                data is not available in the buffer. If 0, returns immediately
                with any records that are available currently in the buffer,
                else returns empty. Must not be negative. Default: 0
        Returns:
            dict: topic to list of records since the last fetch for the
                subscribed list of topics and partitions

        Example usage:


        .. code:: python

            data = yield from consumer.getmany()
            for tp, messages in data.items():
                topic = tp.topic
                partition = tp.partition
                for message in messages:
                    # Process message
                    print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))

        timeout = timeout_ms / 1000
        records = yield from self._fetcher.fetched_records(partitions, timeout)
        return records

    if PY_35:
        @asyncio.coroutine
        def __aiter__(self):
            return self

        @asyncio.coroutine
        def __anext__(self):
            return (yield from self.getone())
예제 #45
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in waiting for all replicas to
    respond, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to *acks=1*. If ``enable_idempotence`` is
            ``True`` defaults to *acks=all*
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, if first request is processed faster,
            than `linger_ms`, producer will wait `linger_ms - process_time`.
            This setting defaults to 0 (i.e. no delay).
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the Java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest (it's a blocking call),
            maximum waiting time can be up to 2 * request_timeout_ms.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
            socket connections. Directly passed into asyncio's
            `create_connection`_. For more information see :ref:`ssl_auth`.
            Default: None.
        connections_max_idle_ms (int): Close idle connections after the number
            of milliseconds specified by this config. Specifying `None` will
            disable idle checks. Default: 540000 (9 minutes).
        enable_idempotence (bool): When set to ``True``, the producer will
            ensure that exactly one copy of each message is written in the
            stream. If ``False``, producer retries due to broker failures,
            etc., may write duplicates of the retried message in the stream.
            Note that enabling idempotence acks to set to 'all'. If it is not
            explicitly set by the user it will be chosen. If incompatible
            values are set, a ``ValueError`` will be thrown.
            New in version 0.5.0.
        sasl_mechanism (str): Authentication mechanism when security_protocol
            is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
            PLAIN, GSSAPI. Default: PLAIN
        sasl_plain_username (str): username for sasl PLAIN authentication.
            Default: None
        sasl_plain_password (str): password for sasl PLAIN authentication.
            Default: None

    Note:
        Many configuration parameters are taken from the Java client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    _COMPRESSORS = {
        'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
        'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
        'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
    }

    _closed = None  # Serves as an uninitialized flag for __del__
    _source_traceback = None

    def __init__(self,
                 *,
                 loop,
                 bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000,
                 request_timeout_ms=40000,
                 api_version='auto',
                 acks=_missing,
                 key_serializer=None,
                 value_serializer=None,
                 compression_type=None,
                 max_batch_size=16384,
                 partitioner=DefaultPartitioner(),
                 max_request_size=1048576,
                 linger_ms=0,
                 send_backoff_ms=100,
                 retry_backoff_ms=100,
                 security_protocol="PLAINTEXT",
                 ssl_context=None,
                 connections_max_idle_ms=540000,
                 enable_idempotence=False,
                 transactional_id=None,
                 transaction_timeout_ms=60000,
                 sasl_mechanism="PLAIN",
                 sasl_plain_password=None,
                 sasl_plain_username=None,
                 sasl_kerberos_service_name='kafka',
                 sasl_kerberos_domain_name=None):
        if acks not in (0, 1, -1, 'all', _missing):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if compression_type:
            checker, compression_attrs = self._COMPRESSORS[compression_type]
            if not checker():
                raise RuntimeError(
                    "Compression library for {} not found".format(
                        compression_type))
        else:
            compression_attrs = 0

        if transactional_id is not None:
            enable_idempotence = True
        else:
            transaction_timeout_ms = INTEGER_MAX_VALUE

        if enable_idempotence:
            if acks is _missing:
                acks = -1
            elif acks not in ('all', -1):
                raise ValueError(
                    "acks={} not supported if enable_idempotence=True".format(
                        acks))
            self._txn_manager = TransactionManager(transactional_id,
                                                   transaction_timeout_ms,
                                                   loop=loop)
        else:
            self._txn_manager = None

        if acks is _missing:
            acks = 1
        elif acks == 'all':
            acks = -1

        AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                AIOKafkaProducer._PRODUCER_CLIENT_ID_SEQUENCE

        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(
            loop=loop,
            bootstrap_servers=bootstrap_servers,
            client_id=client_id,
            metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms,
            retry_backoff_ms=retry_backoff_ms,
            api_version=api_version,
            security_protocol=security_protocol,
            ssl_context=ssl_context,
            connections_max_idle_ms=connections_max_idle_ms,
            sasl_mechanism=sasl_mechanism,
            sasl_plain_username=sasl_plain_username,
            sasl_plain_password=sasl_plain_password,
            sasl_kerberos_service_name=sasl_kerberos_service_name,
            sasl_kerberos_domain_name=sasl_kerberos_domain_name)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata,
            max_batch_size,
            compression_attrs,
            self._request_timeout_ms / 1000,
            txn_manager=self._txn_manager,
            loop=loop)
        self._sender = Sender(self.client,
                              acks=acks,
                              txn_manager=self._txn_manager,
                              retry_backoff_ms=retry_backoff_ms,
                              linger_ms=linger_ms,
                              message_accumulator=self._message_accumulator,
                              request_timeout_ms=request_timeout_ms,
                              loop=loop)

        self._loop = loop
        if loop.get_debug():
            self._source_traceback = traceback.extract_stack(sys._getframe(1))
        self._closed = False

    if PY_341:
        # Warn if producer was not closed properly
        # We don't attempt to close the Consumer, as __del__ is synchronous
        def __del__(self, _warnings=warnings):
            if self._closed is False:
                if PY_36:
                    kwargs = {'source': self}
                else:
                    kwargs = {}
                _warnings.warn("Unclosed AIOKafkaProducer {!r}".format(self),
                               ResourceWarning, **kwargs)
                context = {
                    'producer': self,
                    'message': 'Unclosed AIOKafkaProducer'
                }
                if self._source_traceback is not None:
                    context['source_traceback'] = self._source_traceback
                self._loop.call_exception_handler(context)

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        if self._compression_type == 'lz4':
            assert self.client.api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        if self._txn_manager is not None and self.client.api_version < (0, 11):
            raise UnsupportedVersionError(
                "Idempotent producer available only for Broker vesion 0.11"
                " and above")

        yield from self._sender.start()
        self._message_accumulator.set_api_version(self.client.api_version)
        self._producer_magic = 0 if self.client.api_version < (0, 10) else 1
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def flush(self):
        """Wait untill all batches are Delivered and futures resolved"""
        yield from self._message_accumulator.flush()

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluster"""
        if self._closed:
            return
        self._closed = True

        # If the sender task is down there is no way for accumulator to flush
        if self._sender is not None and self._sender.sender_task is not None:
            yield from asyncio.wait(
                [self._message_accumulator.close(), self._sender.sender_task],
                return_when=asyncio.FIRST_COMPLETED,
                loop=self._loop)

            yield from self._sender.close()

        yield from self.client.close()
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self.client._wait_on_metadata(topic))

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = LegacyRecordBatchBuilder.record_overhead(
            self._producer_magic)
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value, serialized_key,
                   serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(serialized_key, all_partitions, available)

    @asyncio.coroutine
    def send(self,
             topic,
             value=None,
             key=None,
             partition=None,
             timestamp_ms=None,
             headers=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.
            timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970
                UTC) to use as the message timestamp. Defaults to current time.

        Returns:
            asyncio.Future: object that will be set when message is
            processed

        Raises:
            kafka.KafkaTimeoutError: if we can't schedule this record (
                pending buffer is full) in up to `request_timeout_ms`
                milliseconds.

        Note:
            The returned future will wait based on `request_timeout_ms`
            setting. Cancelling the returned future **will not** stop event
            from being sent, but cancelling the ``send`` coroutine itself
            **will**.
        """
        assert value is not None or self.client.api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)

        # Ensure transaction is started and not committing
        if self._txn_manager is not None:
            txn_manager = self._txn_manager
            if txn_manager.transactional_id is not None and \
                    not self._txn_manager.is_in_transaction():
                raise IllegalOperation(
                    "Can't send messages while not in transaction")

        if headers is not None:
            if self.client.api_version < (0, 11):
                raise UnsupportedVersionError(
                    "Headers not supported before Kafka 0.11")
        else:
            # Record parser/builder support only list type, no explicit None
            headers = []

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value, key_bytes,
                                    value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp,
            key_bytes,
            value_bytes,
            self._request_timeout_ms / 1000,
            timestamp_ms=timestamp_ms,
            headers=headers)
        return fut

    @asyncio.coroutine
    def send_and_wait(self,
                      topic,
                      value=None,
                      key=None,
                      partition=None,
                      timestamp_ms=None):
        """Publish a message to a topic and wait the result"""
        future = yield from self.send(topic, value, key, partition,
                                      timestamp_ms)
        return (yield from future)

    def create_batch(self):
        """Create and return an empty BatchBuilder.

        The batch is not queued for send until submission to ``send_batch``.

        Returns:
            BatchBuilder: empty batch to be filled and submitted by the caller.
        """
        return self._message_accumulator.create_builder()

    @asyncio.coroutine
    def send_batch(self, batch, topic, *, partition):
        """Submit a BatchBuilder for publication.

        Arguments:
            batch (BatchBuilder): batch object to be published.
            topic (str): topic where the batch will be published.
            partition (int): partition where this batch will be published.

        Returns:
            asyncio.Future: object that will be set when the batch is
                delivered.
        """
        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)
        # We only validate we have the partition in the metadata here
        partition = self._partition(topic, partition, None, None, None, None)

        # Ensure transaction is started and not committing
        if self._txn_manager is not None:
            txn_manager = self._txn_manager
            if txn_manager.transactional_id is not None and \
                    not self._txn_manager.is_in_transaction():
                raise IllegalOperation(
                    "Can't send messages while not in transaction")

        tp = TopicPartition(topic, partition)
        log.debug("Sending batch to %s", tp)
        future = yield from self._message_accumulator.add_batch(
            batch, tp, self._request_timeout_ms / 1000)
        return future

    def _ensure_transactional(self):
        if self._txn_manager is None or \
                self._txn_manager.transactional_id is None:
            raise IllegalOperation(
                "You need to configure transaction_id to use transactions")

    @asyncio.coroutine
    def begin_transaction(self):
        self._ensure_transactional()
        log.debug("Beginning a new transaction for id %s",
                  self._txn_manager.transactional_id)
        yield from asyncio.shield(self._txn_manager.wait_for_pid(),
                                  loop=self._loop)
        self._txn_manager.begin_transaction()

    @asyncio.coroutine
    def commit_transaction(self):
        self._ensure_transactional()
        log.debug("Committing transaction for id %s",
                  self._txn_manager.transactional_id)
        self._txn_manager.committing_transaction()
        yield from asyncio.shield(self._txn_manager.wait_for_transaction_end(),
                                  loop=self._loop)

    @asyncio.coroutine
    def abort_transaction(self):
        self._ensure_transactional()
        log.debug("Aborting transaction for id %s",
                  self._txn_manager.transactional_id)
        self._txn_manager.aborting_transaction()
        yield from asyncio.shield(self._txn_manager.wait_for_transaction_end(),
                                  loop=self._loop)

    def transaction(self):
        return TransactionContext(self)

    @asyncio.coroutine
    def send_offsets_to_transaction(self, offsets, group_id):
        self._ensure_transactional()

        if not self._txn_manager.is_in_transaction():
            raise IllegalOperation("Not in the middle of a transaction")

        if not group_id or not isinstance(group_id, str):
            raise ValueError(group_id)

        # validate `offsets` structure
        formatted_offsets = commit_structure_validate(offsets)

        log.debug(
            "Begin adding offsets %s for consumer group %s to transaction",
            formatted_offsets, group_id)
        fut = self._txn_manager.add_offsets_to_txn(formatted_offsets, group_id)
        yield from asyncio.shield(fut, loop=self._loop)
예제 #46
0
파일: consumer.py 프로젝트: mkolin/aiokafka
class AIOKafkaConsumer(object):
    """
    A client that consumes records from a Kafka cluster.

    The consumer will transparently handle the failure of servers in the Kafka
    cluster, and adapt as topic-partitions are created or migrate between
    brokers. It also interacts with the assigned kafka Group Coordinator node
    to allow multiple consumers to load balance consumption of topics (feature
    of kafka >= 0.9.0.0).

    .. _create_connection:
        https://docs.python.org/3/library/asyncio-eventloop.html\
        #creating-connections

    Arguments:
        *topics (str): optional list of topics to subscribe to. If not set,
            call subscribe() or assign() before consuming records. Passing
            topics directly is same as calling ``subscribe()`` API.
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'aiokafka-{version}'
        group_id (str or None): name of the consumer group to join for dynamic
            partition assignment (if enabled), and to use for fetching and
            committing offsets. If None, auto-partition assignment (via
            group coordinator) and offset commits are disabled.
            Default: None
        key_deserializer (callable): Any callable that takes a
            raw message key and returns a deserialized key.
        value_deserializer (callable, optional): Any callable that takes a
            raw message value and returns a deserialized value.
        fetch_min_bytes (int): Minimum amount of data the server should
            return for a fetch request, otherwise wait up to
            fetch_max_wait_ms for more data to accumulate. Default: 1.
        fetch_max_wait_ms (int): The maximum amount of time in milliseconds
            the server will block before answering the fetch request if
            there isn't sufficient data to immediately satisfy the
            requirement given by fetch_min_bytes. Default: 500.
        max_partition_fetch_bytes (int): The maximum amount of data
            per-partition the server will return. The maximum total memory
            used for a request = #partitions * max_partition_fetch_bytes.
            This size must be at least as large as the maximum message size
            the server allows or else it is possible for the producer to
            send messages larger than the consumer can fetch. If that
            happens, the consumer can get stuck trying to fetch a large
            message on a certain partition. Default: 1048576.
        max_poll_records (int): The maximum number of records returned in a
            single call to ``getmany()``. Defaults ``None``, no limit.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        auto_offset_reset (str): A policy for resetting offsets on
            OffsetOutOfRange errors: 'earliest' will move to the oldest
            available message, 'latest' will move to the most recent. Any
            ofther value will raise the exception. Default: 'latest'.
        enable_auto_commit (bool): If true the consumer's offset will be
            periodically committed in the background. Default: True.
        auto_commit_interval_ms (int): milliseconds between automatic
            offset commits, if enable_auto_commit is True. Default: 5000.
        check_crcs (bool): Automatically check the CRC32 of the records
            consumed. This ensures no on-the-wire or on-disk corruption to
            the messages occurred. This check adds some overhead, so it may
            be disabled in cases seeking extreme performance. Default: True
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        partition_assignment_strategy (list): List of objects to use to
            distribute partition ownership amongst consumer instances when
            group management is used. This preference is implicit in the order
            of the strategies in the list. When assignment strategy changes:
            to support a change to the assignment strategy, new versions must
            enable support both for the old assignment strategy and the new
            one. The coordinator will choose the old assignment strategy until
            all members have been updated. Then it will choose the new
            strategy. Default: [RoundRobinPartitionAssignor]
        heartbeat_interval_ms (int): The expected time in milliseconds
            between heartbeats to the consumer coordinator when using
            Kafka's group management feature. Heartbeats are used to ensure
            that the consumer's session stays active and to facilitate
            rebalancing when new consumers join or leave the group. The
            value must be set lower than session_timeout_ms, but typically
            should be set no higher than 1/3 of that value. It can be
            adjusted even lower to control the expected time for normal
            rebalances. Default: 3000
        session_timeout_ms (int): The timeout used to detect failures when
            using Kafka's group managementment facilities. Default: 30000
        consumer_timeout_ms (int): maximum wait timeout for background fetching
            routine. Mostly defines how fast the system will see rebalance and
            request new data for new partitions. Default: 200
        api_version (str): specify which kafka API version to use.
            AIOKafkaConsumer supports Kafka API versions >=0.9 only.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
            socket connections. Directly passed into asyncio's
            `create_connection`_. For more information see :ref:`ssl_auth`.
            Default: None.
        exclude_internal_topics (bool): Whether records from internal topics
            (such as offsets) should be exposed to the consumer. If set to True
            the only way to receive records from an internal topic is
            subscribing to it. Requires 0.10+ Default: True
        connections_max_idle_ms (int): Close idle connections after the number
            of milliseconds specified by this config. Default: 540000 (9hours).

    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#newconsumerconfigs

    """
    def __init__(self, *topics, loop,
                 bootstrap_servers='localhost',
                 client_id='aiokafka-' + __version__,
                 group_id=None,
                 key_deserializer=None, value_deserializer=None,
                 fetch_max_wait_ms=500,
                 fetch_min_bytes=1,
                 max_partition_fetch_bytes=1 * 1024 * 1024,
                 request_timeout_ms=40 * 1000,
                 retry_backoff_ms=100,
                 auto_offset_reset='latest',
                 enable_auto_commit=True,
                 auto_commit_interval_ms=5000,
                 check_crcs=True,
                 metadata_max_age_ms=5 * 60 * 1000,
                 partition_assignment_strategy=(RoundRobinPartitionAssignor,),
                 heartbeat_interval_ms=3000,
                 session_timeout_ms=30000,
                 consumer_timeout_ms=200,
                 max_poll_records=None,
                 ssl_context=None,
                 security_protocol='PLAINTEXT',
                 api_version='auto',
                 exclude_internal_topics=True,
                 connections_max_idle_ms=540000):
        if api_version not in ('auto', '0.9', '0.10'):
            raise ValueError("Unsupported Kafka API version")
        self._client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms,
            retry_backoff_ms=retry_backoff_ms,
            api_version=api_version,
            ssl_context=ssl_context,
            security_protocol=security_protocol,
            connections_max_idle_ms=connections_max_idle_ms)

        self._group_id = group_id
        self._heartbeat_interval_ms = heartbeat_interval_ms
        self._retry_backoff_ms = retry_backoff_ms
        self._request_timeout_ms = request_timeout_ms
        self._enable_auto_commit = enable_auto_commit
        self._auto_commit_interval_ms = auto_commit_interval_ms
        self._partition_assignment_strategy = partition_assignment_strategy
        self._key_deserializer = key_deserializer
        self._value_deserializer = value_deserializer
        self._fetch_min_bytes = fetch_min_bytes
        self._fetch_max_wait_ms = fetch_max_wait_ms
        self._max_partition_fetch_bytes = max_partition_fetch_bytes
        self._exclude_internal_topics = exclude_internal_topics
        if max_poll_records is not None and (
                not isinstance(max_poll_records, int) or max_poll_records < 1):
            raise ValueError("`max_poll_records` should be positive Integer")
        self._max_poll_records = max_poll_records
        self._consumer_timeout = consumer_timeout_ms / 1000
        self._check_crcs = check_crcs
        self._subscription = SubscriptionState(auto_offset_reset)
        self._fetcher = None
        self._coordinator = None
        self._closed = False
        self._loop = loop

        # Set for background updates, so we'll finalize them properly. Only
        # active tasks are in this set, as done ones are discarded by callback.
        self._pending_position_fetches = set([])

        if topics:
            self._client.set_topics(topics)
            self._subscription.subscribe(topics=topics)

    @asyncio.coroutine
    def start(self):
        """ Connect to Kafka cluster. This will:

            * Load metadata for all cluster nodes and partition allocation
            * Wait for possible topic autocreation
            * Join group if ``group_id`` provided
        """
        yield from self._client.bootstrap()
        yield from self._wait_topics()

        if self._client.api_version < (0, 9):
            raise ValueError("Unsupported Kafka version: {}".format(
                self._client.api_version))

        self._fetcher = Fetcher(
            self._client, self._subscription, loop=self._loop,
            key_deserializer=self._key_deserializer,
            value_deserializer=self._value_deserializer,
            fetch_min_bytes=self._fetch_min_bytes,
            fetch_max_wait_ms=self._fetch_max_wait_ms,
            max_partition_fetch_bytes=self._max_partition_fetch_bytes,
            check_crcs=self._check_crcs,
            fetcher_timeout=self._consumer_timeout,
            retry_backoff_ms=self._retry_backoff_ms)

        if self._group_id is not None:
            # using group coordinator for automatic partitions assignment
            self._coordinator = GroupCoordinator(
                self._client, self._subscription, loop=self._loop,
                group_id=self._group_id,
                heartbeat_interval_ms=self._heartbeat_interval_ms,
                retry_backoff_ms=self._retry_backoff_ms,
                enable_auto_commit=self._enable_auto_commit,
                auto_commit_interval_ms=self._auto_commit_interval_ms,
                assignors=self._partition_assignment_strategy,
                exclude_internal_topics=self._exclude_internal_topics,
                assignment_changed_cb=self._on_change_subscription)

            yield from self._coordinator.ensure_active_group()
        else:
            # Using a simple assignment coordinator for reassignment on
            # metadata changes
            self._coordinator = NoGroupCoordinator(
                self._client, self._subscription, loop=self._loop,
                exclude_internal_topics=self._exclude_internal_topics,
                assignment_changed_cb=self._on_change_subscription)

            # If we passed `topics` to constructor.
            if self._subscription.needs_partition_assignment:
                yield from self._client.force_metadata_update()
                self._coordinator.assign_all_partitions(check_unknown=True)

    @asyncio.coroutine
    def _wait_topics(self):
        if not self._subscription.subscription:
            return
        for topic in self._subscription.subscription:
            yield from self._client._wait_on_metadata(topic)

    def assign(self, partitions):
        """ Manually assign a list of TopicPartitions to this consumer.

        This interface does not support incremental assignment and will
        replace the previous assignment (if there was one).

        Arguments:
            partitions (list of TopicPartition): assignment for this instance.

        Raises:
            IllegalStateError: if consumer has already called subscribe()

        Warning:
            It is not possible to use both manual partition assignment with
            assign() and group assignment with subscribe().

        Note:
            Manual topic assignment through this method does not use the
            consumer's group management functionality. As such, there will be
            **no rebalance operation triggered** when group membership or
            cluster and topic metadata change.
        """
        self._subscription.assign_from_user(partitions)
        self._client.set_topics([tp.topic for tp in partitions])
        self._on_change_subscription()

    def assignment(self):
        """ Get the set of partitions currently assigned to this consumer.

        If partitions were directly assigned using ``assign()``, then this will
        simply return the same partitions that were previously assigned.

        If topics were subscribed using ``subscribe()``, then this will give
        the set of topic partitions currently assigned to the consumer (which
        may be empty if the assignment hasn't happened yet or if the partitions
        are in the process of being reassigned).

        Returns:
            set: {TopicPartition, ...}
        """
        return self._subscription.assigned_partitions()

    @asyncio.coroutine
    def stop(self):
        """ Close the consumer, while waiting for finilizers:

            * Commit last consumed message if autocommit enabled
            * Leave group if used Consumer Groups
        """
        if self._closed:
            return
        log.debug("Closing the KafkaConsumer.")
        self._closed = True
        for task in list(self._pending_position_fetches):
            task.cancel()
            try:
                yield from task
            except asyncio.CancelledError:
                pass
        if self._coordinator:
            yield from self._coordinator.close()
        if self._fetcher:
            yield from self._fetcher.close()
        yield from self._client.close()
        log.debug("The KafkaConsumer has closed.")

    @asyncio.coroutine
    def commit(self, offsets=None):
        """ Commit offsets to Kafka.

        This commits offsets only to Kafka. The offsets committed using this
        API will be used on the first fetch after every rebalance and also on
        startup. As such, if you need to store offsets in anything other than
        Kafka, this API should not be used.

        Currently only supports kafka-topic offset storage (not zookeeper)

        When explicitly passing ``offsets`` use either offset of next record,
        or tuple of offset and metadata::

            tp = TopicPartition(msg.topic, msg.partition)
            metadata = "Some utf-8 metadata"
            # Either
            await consumer.commit({tp: msg.offset + 1})
            # Or position directly
            await consumer.commit({tp: (msg.offset + 1, metadata)})

        .. note:: If you want `fire and forget` commit, like ``commit_async()``
            in *kafka-python*, just run it in a task. Something like::

                fut = loop.create_task(consumer.commit())
                fut.add_done_callback(on_commit_done)

        Arguments:
            offsets (dict, optional): {TopicPartition: (offset, metadata)} dict
                to commit with the configured ``group_id``. Defaults to current
                consumed offsets for all subscribed partitions.
        Raises:
            IllegalOperation: If used with ``group_id == None``
            ValueError: If offsets is of wrong format
            KafkaError: If commit failed on broker side. This could be due to
                invalid offset, too long metadata, authorization failure, etc.
        """
        if self._group_id is None:
            raise IllegalOperation("Requires group_id")

        if offsets is None:
            offsets = self._subscription.all_consumed_offsets()
        else:
            # validate `offsets` structure
            if not offsets or not isinstance(offsets, dict):
                raise ValueError(offsets)

            formatted_offsets = {}
            for tp, offset_and_metadata in offsets.items():
                if not isinstance(tp, TopicPartition):
                    raise ValueError("Key should be TopicPartition instance")

                if isinstance(offset_and_metadata, int):
                    offset, metadata = offset_and_metadata, ""
                else:
                    try:
                        offset, metadata = offset_and_metadata
                    except Exception:
                        raise ValueError(offsets)

                    if not isinstance(metadata, str):
                        raise ValueError("Metadata should be a string")

                formatted_offsets[tp] = OffsetAndMetadata(offset, metadata)

            offsets = formatted_offsets

        yield from self._coordinator.commit_offsets(offsets)

    @asyncio.coroutine
    def committed(self, partition):
        """ Get the last committed offset for the given partition. (whether the
        commit happened by this process or another).

        This offset will be used as the position for the consumer
        in the event of a failure.

        This call may block to do a remote call if the partition in question
        isn't assigned to this consumer or if the consumer hasn't yet
        initialized its cache of committed offsets.

        Arguments:
            partition (TopicPartition): the partition to check

        Returns:
            The last committed offset, or None if there was no prior commit.

        Raises:
            IllegalOperation: If used with ``group_id == None``
        """
        if self._group_id is None:
            raise IllegalOperation("Requires group_id")

        if self._subscription.is_assigned(partition):
            committed = self._subscription.assignment[partition].committed
            if committed is None:
                yield from self._coordinator.refresh_committed_offsets()
                committed = self._subscription.assignment[partition].committed
        else:
            commit_map = yield from self._coordinator.fetch_committed_offsets(
                [partition])
            if partition in commit_map:
                committed = commit_map[partition].offset
            else:
                committed = None
        return committed

    @asyncio.coroutine
    def topics(self):
        """ Get all topics the user is authorized to view.

        Returns:
            set: topics
        """
        cluster = yield from self._client.fetch_all_metadata()
        return cluster.topics()

    def partitions_for_topic(self, topic):
        """ Get metadata about the partitions for a given topic.

        This method will return `None` if Consumer does not already have
        metadata for this topic.

        Arguments:
            topic (str): topic to check

        Returns:
            set: partition ids
        """
        return self._client.cluster.partitions_for_topic(topic)

    @asyncio.coroutine
    def position(self, partition):
        """ Get the offset of the *next record* that will be fetched (if a
        record with that offset exists on broker).

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int: offset
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        offset = self._subscription.assignment[partition].position
        if offset is None:
            yield from self._update_fetch_positions([partition])
            offset = self._subscription.assignment[partition].position
        return offset

    def highwater(self, partition):
        """ Last known highwater offset for a partition.

        A highwater offset is the offset that will be assigned to the next
        message that is produced. It may be useful for calculating lag, by
        comparing with the reported position. Note that both position and
        highwater refer to the *next* offset – i.e., highwater offset is one
        greater than the newest available message.

        Highwater offsets are returned as part of ``FetchResponse``, so will
        not be available if messages for this partition were not requested yet.

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int or None: offset if available
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        return self._subscription.assignment[partition].highwater

    def seek(self, partition, offset):
        """ Manually specify the fetch offset for a TopicPartition.

        Overrides the fetch offsets that the consumer will use on the next
        ``getmany()``/``getone()`` call. If this API is invoked for the same
        partition more than once, the latest offset will be used on the next
        fetch.

        Note:
            You may lose data if this API is arbitrarily used in the middle
            of consumption to reset the fetch offsets. Use it either on
            rebalance listeners or after all pending messages are processed.

        Arguments:
            partition (TopicPartition): partition for seek operation
            offset (int): message offset in partition

        Raises:
            AssertionError: if offset is not an int >= 0;
                            or if partition is not currently assigned.
        """
        assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
        assert partition in self._subscription.assigned_partitions(), \
            'Unassigned partition'
        log.debug("Seeking to offset %s for partition %s", offset, partition)
        self._subscription.assignment[partition].seek(offset)

    @asyncio.coroutine
    def seek_to_beginning(self, *partitions):
        """ Seek to the oldest available offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            TypeError: If partitions are not instances of TopicPartition

        .. versionadded:: 0.3.0

        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        yield from self._coordinator.ensure_partitions_assigned()

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            log.debug("Seeking to beginning of partition %s", tp)
            self._subscription.need_offset_reset(
                tp, OffsetResetStrategy.EARLIEST)
        yield from self._fetcher.update_fetch_positions(partitions)

    @asyncio.coroutine
    def seek_to_end(self, *partitions):
        """Seek to the most recent available offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            TypeError: If partitions are not instances of TopicPartition

        .. versionadded:: 0.3.0

        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        yield from self._coordinator.ensure_partitions_assigned()

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            log.debug("Seeking to end of partition %s", tp)
            self._subscription.need_offset_reset(
                tp, OffsetResetStrategy.LATEST)
        yield from self._fetcher.update_fetch_positions(partitions)

    @asyncio.coroutine
    def seek_to_committed(self, *partitions):
        """ Seek to the committed offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            IllegalOperation: If used with ``group_id == None``

        .. versionchanged:: 0.3.0

            Changed ``AssertionError`` to ``IllegalStateError`` in case of
            unassigned partition
        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        yield from self._coordinator.ensure_partitions_assigned()

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            log.debug("Seeking to committed of partition %s", tp)
            offset = yield from self.committed(tp)
            if offset and offset > 0:
                self.seek(tp, offset)

    @asyncio.coroutine
    def offsets_for_times(self, timestamps):
        """
        Look up the offsets for the given partitions by timestamp. The returned
        offset for each partition is the earliest offset whose timestamp is
        greater than or equal to the given timestamp in the corresponding
        partition.

        The consumer does not have to be assigned the partitions.

        If the message format version in a partition is before 0.10.0, i.e.
        the messages do not have timestamps, ``None`` will be returned for that
        partition.

        Note:
            This method may block indefinitely if the partition does not exist.

        Arguments:
            timestamps (dict): ``{TopicPartition: int}`` mapping from partition
                to the timestamp to look up. Unit should be milliseconds since
                beginning of the epoch (midnight Jan 1, 1970 (UTC))

        Returns:
            dict: ``{TopicPartition: OffsetAndTimestamp}`` mapping from
            partition to the timestamp and offset of the first message with
            timestamp greater than or equal to the target timestamp.

        Raises:
            ValueError: If the target timestamp is negative
            UnsupportedVersionError: If the broker does not support looking
                up the offsets by timestamp.
            KafkaTimeoutError: If fetch failed in request_timeout_ms

        .. versionadded:: 0.3.0

        """
        if self._client.api_version <= (0, 10, 0):
            raise UnsupportedVersionError(
                "offsets_for_times API not supported for cluster version {}"
                .format(self._client.api_version))
        for tp, ts in timestamps.items():
            timestamps[tp] = int(ts)
            if ts < 0:
                raise ValueError(
                    "The target time for partition {} is {}. The target time "
                    "cannot be negative.".format(tp, ts))
        offsets = yield from self._fetcher.get_offsets_by_times(
            timestamps, self._request_timeout_ms)
        return offsets

    @asyncio.coroutine
    def beginning_offsets(self, partitions):
        """ Get the first offset for the given partitions.

        This method does not change the current consumer position of the
        partitions.

        Note:
            This method may block indefinitely if the partition does not exist.

        Arguments:
            partitions (list): List of TopicPartition instances to fetch
                offsets for.

        Returns:
            dict: ``{TopicPartition: int}`` mapping of partition to  earliest
            available offset.

        Raises:
            UnsupportedVersionError: If the broker does not support looking
                up the offsets by timestamp.
            KafkaTimeoutError: If fetch failed in request_timeout_ms.

        .. versionadded:: 0.3.0

        """
        if self._client.api_version <= (0, 10, 0):
            raise UnsupportedVersionError(
                "offsets_for_times API not supported for cluster version {}"
                .format(self._client.api_version))
        offsets = yield from self._fetcher.beginning_offsets(
            partitions, self._request_timeout_ms)
        return offsets

    @asyncio.coroutine
    def end_offsets(self, partitions):
        """ Get the last offset for the given partitions. The last offset of a
        partition is the offset of the upcoming message, i.e. the offset of the
        last available message + 1.

        This method does not change the current consumer position of the
        partitions.

        Note:
            This method may block indefinitely if the partition does not exist.

        Arguments:
            partitions (list): List of TopicPartition instances to fetch
                offsets for.

        Returns:
            dict: ``{TopicPartition: int}`` mapping of partition to last
            available offset + 1.

        Raises:
            UnsupportedVersionError: If the broker does not support looking
                up the offsets by timestamp.
            KafkaTimeoutError: If fetch failed in request_timeout_ms

        .. versionadded:: 0.3.0

        """
        if self._client.api_version <= (0, 10, 0):
            raise UnsupportedVersionError(
                "offsets_for_times API not supported for cluster version {}"
                .format(self._client.api_version))
        offsets = yield from self._fetcher.end_offsets(
            partitions, self._request_timeout_ms)
        return offsets

    def subscribe(self, topics=(), pattern=None, listener=None):
        """ Subscribe to a list of topics, or a topic regex pattern.

        Partitions will be dynamically assigned via a group coordinator.
        Topic subscriptions are not incremental: this list will replace the
        current assignment (if there is one).

        This method is incompatible with ``assign()``.

        Arguments:
           topics (list): List of topics for subscription.
           pattern (str): Pattern to match available topics. You must provide
               either topics or pattern, but not both.
           listener (ConsumerRebalanceListener): Optionally include listener
               callback, which will be called before and after each rebalance
               operation.
               As part of group management, the consumer will keep track of
               the list of consumers that belong to a particular group and
               will trigger a rebalance operation if one of the following
               events trigger:

               * Number of partitions change for any of the subscribed topics
               * Topic is created or deleted
               * An existing member of the consumer group dies
               * A new member is added to the consumer group

               When any of these events are triggered, the provided listener
               will be invoked first to indicate that the consumer's
               assignment has been revoked, and then again when the new
               assignment has been received. Note that this listener will
               immediately override any listener set in a previous call
               to subscribe. It is guaranteed, however, that the partitions
               revoked/assigned
               through this interface are from topics subscribed in this call.
        Raises:
            IllegalStateError: if called after previously calling assign()
            ValueError: if neither topics or pattern is provided or both
               are provided
            TypeError: if listener is not a ConsumerRebalanceListener
        """
        if not (topics or pattern):
            raise ValueError(
                "You should provide either `topics` or `pattern`")
        if topics and pattern:
            raise ValueError(
                "You can't provide both `topics` and `pattern`")
        if pattern:
            try:
                re.compile(pattern)
            except re.error as err:
                raise ValueError(
                    "{!r} is not a valid pattern: {}".format(pattern, err))

        # SubscriptionState handles error checking
        self._subscription.subscribe(topics=topics,
                                     pattern=pattern,
                                     listener=listener)
        # There's a bug in subscription, that pattern is not unset if we change
        # from pattern to simple topic subscription
        if not pattern:
            self._subscription.subscribed_pattern = None

        # regex will need all topic metadata
        if pattern is not None:
            self._client.set_topics([])
            log.debug("Subscribed to topic pattern: %s", pattern)
        else:
            self._client.set_topics(self._subscription.group_subscription())
            log.debug("Subscribed to topic(s): %s", topics)

    def subscription(self):
        """ Get the current topic subscription.

        Returns:
            set: {topic, ...}
        """
        return frozenset(self._subscription.subscription or [])

    def unsubscribe(self):
        """ Unsubscribe from all topics and clear all assigned partitions. """
        self._subscription.unsubscribe()
        self._client.set_topics([])
        log.debug(
            "Unsubscribed all topics or patterns and assigned partitions")

    @asyncio.coroutine
    def _update_fetch_positions(self, partitions):
        """
        Set the fetch position to the committed position (if there is one)
        or reset it using the offset reset policy the user has configured.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                updating fetch positions

        Raises:
            NoOffsetForPartitionError: If no offset is stored for a given
                partition and no offset reset policy is defined
        """
        if self._group_id is not None:
            # refresh commits for all assigned partitions
            yield from self._coordinator.refresh_committed_offsets()

        # then do any offset lookups in case some positions are not known
        yield from self._fetcher.update_fetch_positions(partitions)

    def _on_change_subscription(self):
        """ This is `group rebalanced` signal handler used to update fetch
            positions of assigned partitions
        """
        if self._closed:  # pragma: no cover
            return
        # fetch positions if we have partitions we're subscribed
        # to that we don't know the offset for
        if not self._subscription.has_all_fetch_positions():
            task = ensure_future(
                self._update_fetch_positions(
                    self._subscription.missing_fetch_positions()),
                loop=self._loop
            )
            self._pending_position_fetches.add(task)

            def on_done(fut, tasks=self._pending_position_fetches):
                tasks.discard(fut)
                try:
                    fut.result()
                except Exception as err:  # pragma: no cover
                    log.error("Failed to update fetch positions: %r", err)
            task.add_done_callback(on_done)

    @asyncio.coroutine
    def getone(self, *partitions):
        """
        Get one message from Kafka.
        If no new messages prefetched, this method will wait for it.

        Arguments:
            partitions (List[TopicPartition]): Optional list of partitions to
                return from. If no partitions specified then returned message
                will be from any partition, which consumer is subscribed to.

        Returns:
            ConsumerRecord

        Will return instance of

        .. code:: python

            collections.namedtuple(
                "ConsumerRecord",
                ["topic", "partition", "offset", "key", "value"])

        Example usage:


        .. code:: python

            while True:
                message = await consumer.getone()
                topic = message.topic
                partition = message.partition
                # Process message
                print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
        if self._closed:
            raise ConsumerStoppedError()

        msg = yield from self._fetcher.next_record(partitions)
        return msg

    @asyncio.coroutine
    def getmany(self, *partitions, timeout_ms=0, max_records=None):
        """Get messages from assigned topics / partitions.

        Prefetched messages are returned in batches by topic-partition.
        If messages is not available in the prefetched buffer this method waits
        `timeout_ms` milliseconds.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                fetching message. If no one partition specified then all
                subscribed partitions will be used
            timeout_ms (int, optional): milliseconds spent waiting if
                data is not available in the buffer. If 0, returns immediately
                with any records that are available currently in the buffer,
                else returns empty. Must not be negative. Default: 0
        Returns:
            dict: topic to list of records since the last fetch for the
                subscribed list of topics and partitions

        Example usage:


        .. code:: python

            data = await consumer.getmany()
            for tp, messages in data.items():
                topic = tp.topic
                partition = tp.partition
                for message in messages:
                    # Process message
                    print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
        if self._closed:
            raise ConsumerStoppedError()

        if max_records is not None and (
                not isinstance(max_records, int) or max_records < 1):
            raise ValueError("`max_records` must be a positive Integer")

        timeout = timeout_ms / 1000
        records = yield from self._fetcher.fetched_records(
            partitions, timeout,
            max_records=max_records or self._max_poll_records)
        return records

    if PY_35:
        @asyncio.coroutine
        def __aiter__(self):
            if self._closed:
                raise ConsumerStoppedError()
            return self

        @asyncio.coroutine
        def __anext__(self):
            """Asyncio iterator interface for consumer

            Note:
                TopicAuthorizationFailedError and OffsetOutOfRangeError
                exceptions can be raised in iterator.
                All other KafkaError exceptions will be logged and not raised
            """
            while True:
                try:
                    return (yield from self.getone())
                except ConsumerStoppedError:
                    raise StopAsyncIteration  # noqa: F821
                except (TopicAuthorizationFailedError,
                        OffsetOutOfRangeError) as err:
                    raise err
                except KafkaError as err:
                    log.error("error in consumer iterator: %s", err)