Example #1
0
    def _maybe_do_transactional_request(self):
        txn_manager = self._txn_manager

        # If we have any new partitions, still not added to the transaction
        # we need to do that before committing
        tps = txn_manager.partitions_to_add()
        if tps:
            return create_task(
                self._do_add_partitions_to_txn(tps))

        # We need to add group to transaction before we can commit the offset
        group_id = txn_manager.consumer_group_to_add()
        if group_id is not None:
            return create_task(
                self._do_add_offsets_to_txn(group_id))

        # Now commit the added group's offset
        commit_data = txn_manager.offsets_to_commit()
        if commit_data is not None:
            offsets, group_id = commit_data
            return create_task(
                self._do_txn_offset_commit(offsets, group_id))

        commit_result = txn_manager.needs_transaction_commit()
        if commit_result is not None:
            return create_task(
                self._do_txn_commit(commit_result))
Example #2
0
    async def test_no_concurrent_send_on_connection(self):
        client = AIOKafkaClient(bootstrap_servers=self.hosts,
                                metadata_max_age_ms=10000)
        await client.bootstrap()
        self.add_cleanup(client.close)

        await self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)])])
        vanila_request = MetadataRequest([])

        loop = get_running_loop()
        send_time = loop.time()
        long_task = create_task(client.send(node_id, wait_request))
        await asyncio.sleep(0.0001)
        self.assertFalse(long_task.done())

        await client.send(node_id, vanila_request)
        resp_time = loop.time()
        fetch_resp = await long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertGreaterEqual(resp_time - send_time, 0.5)
Example #3
0
    async def test_consumer_stops_iter(self):
        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset="earliest")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        async def iterator():
            async for msg in consumer:  # pragma: no cover
                assert False, "No items should be here, got {}".format(msg)

        task = create_task(iterator())
        await asyncio.sleep(0.1)
        # As we didn't input any data into Kafka
        self.assertFalse(task.done())

        await consumer.stop()
        # Should just stop iterator, no errors
        await task
        # But creating another iterator should result in an error, we can't
        # have dead loops like:
        #
        #   while True:
        #     async for msg in consumer:
        #       print(msg)
        with self.assertRaises(ConsumerStoppedError):
            await iterator()
Example #4
0
            def start_pending_task(coro, node_id, self=self):
                task = create_task(coro)
                self._pending_tasks.add(task)
                self._in_flight.add(node_id)

                def on_done(fut, self=self):
                    self._in_flight.discard(node_id)
                task.add_done_callback(on_done)
    async def test_consumer_transactional_commit(self):
        producer = AIOKafkaProducer(bootstrap_servers=self.hosts,
                                    transactional_id="sobaka_producer")
        await producer.start()
        self.add_cleanup(producer.stop)

        producer2 = AIOKafkaProducer(bootstrap_servers=self.hosts)
        await producer2.start()
        self.add_cleanup(producer2.stop)

        consumer = AIOKafkaConsumer(self.topic,
                                    bootstrap_servers=self.hosts,
                                    auto_offset_reset="earliest",
                                    isolation_level="read_committed")
        await consumer.start()
        self.add_cleanup(consumer.stop)

        # We will produce from a transactional producer and then from a
        # non-transactional. This should block consumption on that partition
        # until transaction is committed.
        await producer.begin_transaction()
        meta = await producer.send_and_wait(self.topic,
                                            b'Hello from transaction',
                                            partition=0)

        meta2 = await producer2.send_and_wait(self.topic,
                                              b'Hello from non-transaction',
                                              partition=0)

        # The transaction blocked consumption
        task = create_task(consumer.getone())
        await asyncio.sleep(1)
        self.assertFalse(task.done())

        tp = TopicPartition(self.topic, 0)
        self.assertEqual(consumer.last_stable_offset(tp), 0)
        self.assertEqual(consumer.highwater(tp), 2)

        await producer.commit_transaction()

        # Order should be preserved. We first yield the first message, although
        # it belongs to a committed afterwards transaction
        msg = await task
        self.assertEqual(msg.offset, meta.offset)
        self.assertEqual(msg.timestamp, meta.timestamp)
        self.assertEqual(msg.value, b"Hello from transaction")
        self.assertEqual(msg.key, None)

        msg = await consumer.getone()
        self.assertEqual(msg.offset, meta2.offset)
        self.assertEqual(msg.timestamp, meta2.timestamp)
        self.assertEqual(msg.value, b"Hello from non-transaction")
        self.assertEqual(msg.key, None)

        # 3, because we have a commit marker also
        tp = TopicPartition(self.topic, 0)
        self.assertEqual(consumer.last_stable_offset(tp), 3)
        self.assertEqual(consumer.highwater(tp), 3)
Example #6
0
    async def stop(self):
        """Flush all pending data and close all connections to kafka cluster"""
        if self._closed:
            return
        self._closed = True

        # If the sender task is down there is no way for accumulator to flush
        if self._sender is not None and self._sender.sender_task is not None:
            await asyncio.wait([
                create_task(self._message_accumulator.close()),
                self._sender.sender_task],
                return_when=asyncio.FIRST_COMPLETED)

            await self._sender.close()

        await self.client.close()
        log.debug("The Kafka producer has closed.")
Example #7
0
    async def test_concurrent_send_on_different_connection_groups(self):
        client = AIOKafkaClient(bootstrap_servers=self.hosts,
                                metadata_max_age_ms=10000)
        await client.bootstrap()
        self.add_cleanup(client.close)

        await self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        broker = client.cluster.broker_metadata(node_id)
        client.cluster.add_coordinator(node_id,
                                       broker.host,
                                       broker.port,
                                       rack=None,
                                       purpose=(CoordinationType.GROUP, ""))

        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)])])
        vanila_request = MetadataRequest([])

        loop = get_running_loop()
        send_time = loop.time()
        long_task = create_task(client.send(node_id, wait_request))
        await asyncio.sleep(0.0001)
        self.assertFalse(long_task.done())

        await client.send(node_id,
                          vanila_request,
                          group=ConnectionGroup.COORDINATION)
        resp_time = loop.time()
        self.assertFalse(long_task.done())

        fetch_resp = await long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertLess(resp_time - send_time, 0.5)
Example #8
0
    async def _sender_routine(self):
        """ Background task, that sends pending batches to leader nodes for
        batch's partition. This incapsulates same logic as Java's `Sender`
        background thread. Because we use asyncio this is more event based
        loop, rather than counting timeout till next possible even like in
        Java.
        """

        tasks = set()
        txn_task = None  # Track a single task for transaction interactions
        try:
            while True:
                # If indempotence or transactions are turned on we need to
                # have a valid PID to send any request below
                await self._maybe_wait_for_pid()

                waiters = set()
                # As transaction coordination is done via a single, separate
                # socket we do not need to pump it to several nodes, as we do
                # with produce requests.
                # We will only have 1 task at a time and will try to spawn
                # another once that is done.
                txn_manager = self._txn_manager
                muted_partitions = self._muted_partitions
                if txn_manager is not None and \
                        txn_manager.transactional_id is not None:
                    if txn_task is None or txn_task.done():
                        txn_task = self._maybe_do_transactional_request()
                        if txn_task is not None:
                            tasks.add(txn_task)
                        else:
                            # Waiters will not be awaited on exit, tasks will
                            waiters.add(txn_manager.make_task_waiter())
                    # We can't have a race condition between
                    # AddPartitionsToTxnRequest and a ProduceRequest, so we
                    # mute the partition until added.
                    muted_partitions = (muted_partitions
                                        | txn_manager.partitions_to_add())
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight,
                        muted_partitions=muted_partitions)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = create_task(self._send_produce_req(
                        node_id, batches))
                    self._in_flight.add(node_id)
                    for tp in batches:
                        self._muted_partitions.add(tp)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    fut = self.client.force_metadata_update()
                    waiters |= tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters |= tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                # * Metadata update if partition leader unknown
                done, _ = await asyncio.wait(
                    waiters, return_when=asyncio.FIRST_COMPLETED)

                # done tasks should never produce errors, if they are it's a
                # bug
                for task in done:
                    task.result()

                tasks -= done

        except asyncio.CancelledError:
            # done tasks should never produce errors, if they are it's a bug
            for task in tasks:
                await task
        except (ProducerFenced, OutOfOrderSequenceNumber,
                TransactionalIdAuthorizationFailed):
            raise
        except Exception:  # pragma: no cover
            log.error("Unexpected error in sender routine", exc_info=True)
            raise KafkaError("Unexpected error during batch delivery")
Example #9
0
 async def start(self):
     # If producer is idempotent we need to assure we have PID found
     await self._maybe_wait_for_pid()
     self._sender_task = create_task(self._sender_routine())
     self._sender_task.add_done_callback(self._fail_all)
Example #10
0
    async def test_load_metadata(self):
        brokers = [(0, 'broker_1', 4567), (1, 'broker_2', 5678)]

        topics = [
            (NO_ERROR, 'topic_1', [(NO_ERROR, 0, 1, [1, 2], [1, 2])]),
            (NO_ERROR, 'topic_2', [
                (NO_LEADER, 0, -1, [], []),
                (NO_LEADER, 1, 1, [], []),
            ]),
            (NO_LEADER, 'topic_no_partitions', []),
            (UNKNOWN_TOPIC_OR_PARTITION, 'topic_unknown', []),
            (NO_ERROR, 'topic_3', [(NO_ERROR, 0, 0, [0, 1], [0, 1]),
                                   (NO_ERROR, 1, 1, [1, 0], [1, 0]),
                                   (NO_ERROR, 2, 0, [0, 1], [0, 1])]),
            (NO_ERROR, 'topic_4', [
                (NO_ERROR, 0, 0, [0, 1], [0, 1]),
                (REPLICA_NOT_AVAILABLE, 1, 1, [1, 0], [1, 0]),
            ]),
            (INVALID_TOPIC, 'topic_5', []),  # Just ignored
            (UNKNOWN_ERROR, 'topic_6', []),  # Just ignored
            (TOPIC_AUTHORIZATION_FAILED, 'topic_auth_error', []),
        ]

        async def send(request_id):
            return MetadataResponse(brokers, topics)

        mocked_conns = {(0, 0): mock.MagicMock()}
        mocked_conns[(0, 0)].send.side_effect = send
        client = AIOKafkaClient(bootstrap_servers=['broker_1:4567'])
        task = create_task(client._md_synchronizer())
        client._conns = mocked_conns
        client.cluster.update_metadata(MetadataResponse(brokers[:1], []))

        await client.force_metadata_update()
        task.cancel()

        md = client.cluster
        c_brokers = md.brokers()
        self.assertEqual(len(c_brokers), 2)
        expected_brokers = [(0, 'broker_1', 4567, None),
                            (1, 'broker_2', 5678, None)]
        self.assertEqual(sorted(expected_brokers), sorted(list(c_brokers)))
        c_topics = md.topics()
        self.assertEqual(len(c_topics), 4)
        self.assertEqual(md.partitions_for_topic('topic_1'), set([0]))
        self.assertEqual(md.partitions_for_topic('topic_2'), set([0, 1]))
        self.assertEqual(md.partitions_for_topic('topic_3'), set([0, 1, 2]))
        self.assertEqual(md.partitions_for_topic('topic_4'), set([0, 1]))
        self.assertEqual(md.available_partitions_for_topic('topic_2'),
                         set([1]))

        mocked_conns[(0, 0)].connected.return_value = False
        is_ready = await client.ready(0)
        self.assertEqual(is_ready, False)
        is_ready = await client.ready(1)
        self.assertEqual(is_ready, False)
        self.assertEqual(mocked_conns, {})

        with self.assertRaises(NodeNotReadyError):
            await client.send(0, None)

        self.assertEqual(md.unauthorized_topics, {'topic_auth_error'})
Example #11
0
    async def test_fetcher__update_fetch_positions(self):
        client = AIOKafkaClient(bootstrap_servers=[])
        subscriptions = SubscriptionState()
        fetcher = Fetcher(client, subscriptions)
        self.add_cleanup(fetcher.close)
        # Disable background task
        fetcher._fetch_task.cancel()
        try:
            await fetcher._fetch_task
        except asyncio.CancelledError:
            pass
        fetcher._fetch_task = create_task(asyncio.sleep(1000000))

        partition = TopicPartition('test', 0)
        offsets = {partition: OffsetAndTimestamp(12, -1)}

        async def _proc_offset_request(node_id, topic_data):
            return offsets

        fetcher._proc_offset_request = mock.Mock()
        fetcher._proc_offset_request.side_effect = _proc_offset_request

        def reset_assignment():
            subscriptions.assign_from_user({partition})
            assignment = subscriptions.subscription.assignment
            tp_state = assignment.state_value(partition)
            return assignment, tp_state

        assignment, tp_state = reset_assignment()

        self.assertIsNone(tp_state._position)

        # CASE: reset from committed
        # In basic case we will need to wait for committed
        update_task = create_task(
            fetcher._update_fetch_positions(assignment, 0, [partition]), )
        await asyncio.sleep(0.1)
        self.assertFalse(update_task.done())
        # Will continue only after committed is resolved
        tp_state.update_committed(OffsetAndMetadata(4, ""))
        needs_wakeup = await update_task
        self.assertFalse(needs_wakeup)
        self.assertEqual(tp_state._position, 4)
        self.assertEqual(fetcher._proc_offset_request.call_count, 0)

        # CASE: will not query committed if position already present
        await fetcher._update_fetch_positions(assignment, 0, [partition])
        self.assertEqual(tp_state._position, 4)
        self.assertEqual(fetcher._proc_offset_request.call_count, 0)

        # CASE: awaiting_reset for the partition
        tp_state.await_reset(OffsetResetStrategy.LATEST)
        self.assertIsNone(tp_state._position)
        await fetcher._update_fetch_positions(assignment, 0, [partition])
        self.assertEqual(tp_state._position, 12)
        self.assertEqual(fetcher._proc_offset_request.call_count, 1)

        # CASE: seeked while waiting for committed to be resolved
        assignment, tp_state = reset_assignment()
        update_task = create_task(
            fetcher._update_fetch_positions(assignment, 0, [partition]), )
        await asyncio.sleep(0.1)
        self.assertFalse(update_task.done())

        tp_state.seek(8)
        tp_state.update_committed(OffsetAndMetadata(4, ""))
        await update_task
        self.assertEqual(tp_state._position, 8)
        self.assertEqual(fetcher._proc_offset_request.call_count, 1)

        # CASE: awaiting_reset during waiting for committed
        assignment, tp_state = reset_assignment()
        update_task = create_task(
            fetcher._update_fetch_positions(assignment, 0, [partition]), )
        await asyncio.sleep(0.1)
        self.assertFalse(update_task.done())

        tp_state.await_reset(OffsetResetStrategy.LATEST)
        tp_state.update_committed(OffsetAndMetadata(4, ""))
        await update_task
        self.assertEqual(tp_state._position, 12)
        self.assertEqual(fetcher._proc_offset_request.call_count, 2)

        # CASE: reset using default strategy if committed offset undefined
        assignment, tp_state = reset_assignment()
        loop = get_running_loop()
        loop.call_later(0.01, tp_state.update_committed,
                        OffsetAndMetadata(-1, ""))
        await fetcher._update_fetch_positions(assignment, 0, [partition])
        self.assertEqual(tp_state._position, 12)
        self.assertEqual(fetcher._records, {})

        # CASE: set error if _default_reset_strategy = OffsetResetStrategy.NONE
        assignment, tp_state = reset_assignment()
        loop.call_later(0.01, tp_state.update_committed,
                        OffsetAndMetadata(-1, ""))
        fetcher._default_reset_strategy = OffsetResetStrategy.NONE
        needs_wakeup = await fetcher._update_fetch_positions(
            assignment, 0, [partition])
        self.assertTrue(needs_wakeup)
        self.assertIsNone(tp_state._position)
        self.assertIsInstance(fetcher._records[partition], FetchError)
        fetcher._records.clear()

        # CASE: if _proc_offset_request errored, we will retry on another spin
        fetcher._proc_offset_request.side_effect = UnknownError()
        assignment, tp_state = reset_assignment()
        tp_state.await_reset(OffsetResetStrategy.LATEST)
        await fetcher._update_fetch_positions(assignment, 0, [partition])
        self.assertIsNone(tp_state._position)
        self.assertTrue(tp_state.awaiting_reset)

        # CASE: reset 2 partitions separately, 1 will raise, 1 will get
        #       committed
        fetcher._proc_offset_request.side_effect = _proc_offset_request
        partition2 = TopicPartition('test', 1)
        subscriptions.assign_from_user({partition, partition2})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(partition)
        tp_state2 = assignment.state_value(partition2)
        tp_state.await_reset(OffsetResetStrategy.LATEST)
        loop.call_later(0.01, tp_state2.update_committed,
                        OffsetAndMetadata(5, ""))
        await fetcher._update_fetch_positions(assignment, 0,
                                              [partition, partition2])
        self.assertEqual(tp_state.position, 12)
        self.assertEqual(tp_state2.position, 5)
Example #12
0
 def _create_reader_task(self):
     self_ref = weakref.ref(self)
     read_task = create_task(self._read(self_ref))
     read_task.add_done_callback(
         functools.partial(self._on_read_task_error, self_ref))
     return read_task
Example #13
0
    def __init__(
            self, client, subscriptions, *,
            key_deserializer=None,
            value_deserializer=None,
            fetch_min_bytes=1,
            fetch_max_bytes=52428800,
            fetch_max_wait_ms=500,
            max_partition_fetch_bytes=1048576,
            check_crcs=True,
            fetcher_timeout=0.2,
            prefetch_backoff=0.1,
            retry_backoff_ms=100,
            auto_offset_reset='latest',
            isolation_level="read_uncommitted"):
        self._client = client
        self._loop = client._loop
        self._key_deserializer = key_deserializer
        self._value_deserializer = value_deserializer
        self._fetch_min_bytes = fetch_min_bytes
        self._fetch_max_bytes = fetch_max_bytes
        self._fetch_max_wait_ms = fetch_max_wait_ms
        self._max_partition_fetch_bytes = max_partition_fetch_bytes
        self._check_crcs = check_crcs
        self._fetcher_timeout = fetcher_timeout
        self._prefetch_backoff = prefetch_backoff
        self._retry_backoff = retry_backoff_ms / 1000
        self._subscriptions = subscriptions
        self._default_reset_strategy = OffsetResetStrategy.from_str(
            auto_offset_reset)

        if isolation_level == "read_uncommitted":
            self._isolation_level = READ_UNCOMMITTED
        elif isolation_level == "read_committed":
            self._isolation_level = READ_COMMITTED
        else:
            raise ValueError(
                "Incorrect isolation level {}".format(isolation_level))

        self._records = collections.OrderedDict()
        self._in_flight = set()
        self._pending_tasks = set()

        self._wait_consume_future = None
        self._fetch_waiters = set()

        # SubscriptionState will pass Coordination critical errors to those
        # waiters directly
        self._subscriptions.register_fetch_waiters(self._fetch_waiters)

        if client.api_version >= (0, 11):
            req_version = 4
        elif client.api_version >= (0, 10, 1):
            req_version = 3
        elif client.api_version >= (0, 10):
            req_version = 2
        else:
            req_version = 1
        self._fetch_request_class = FetchRequest[req_version]

        self._fetch_task = create_task(self._fetch_requests_routine())

        self._closed = False
Example #14
0
    async def check_version(self, node_id=None):
        """Attempt to guess the broker version"""
        if node_id is None:
            default_group_conns = [
                n_id for (n_id, group) in self._conns.keys()
                if group == ConnectionGroup.DEFAULT
            ]
            if default_group_conns:
                node_id = default_group_conns[0]
            else:
                assert self.cluster.brokers(), 'no brokers in metadata'
                node_id = list(self.cluster.brokers())[0].nodeId

        from kafka.protocol.admin import (ListGroupsRequest_v0,
                                          ApiVersionRequest_v0)
        from kafka.protocol.commit import (OffsetFetchRequest_v0,
                                           GroupCoordinatorRequest_v0)
        from kafka.protocol.metadata import MetadataRequest_v0
        test_cases = [
            ((0, 10), ApiVersionRequest_v0()),
            ((0, 9), ListGroupsRequest_v0()),
            ((0, 8, 2), GroupCoordinatorRequest_v0('aiokafka-default-group')),
            ((0, 8, 1), OffsetFetchRequest_v0('aiokafka-default-group', [])),
            ((0, 8, 0), MetadataRequest_v0([])),
        ]

        # kafka kills the connection when it does not recognize an API request
        # so we can send a test request and then follow immediately with a
        # vanilla MetadataRequest. If the server did not recognize the first
        # request, both will be failed with a ConnectionError that wraps
        # socket.error (32, 54, or 104)
        conn = await self._get_conn(node_id, no_hint=True)
        if conn is None:
            raise KafkaConnectionError(
                "No connection to node with id {}".format(node_id))
        for version, request in test_cases:
            try:
                if not conn.connected():
                    await conn.connect()
                assert conn, 'no connection to node with id {}'.format(node_id)
                # request can be ignored by Kafka broker,
                # so we send metadata request and wait response
                task = create_task(conn.send(request))
                await asyncio.wait([task], timeout=0.1)
                try:
                    await conn.send(MetadataRequest_v0([]))
                except KafkaError:
                    # metadata request can be cancelled in case
                    # of invalid correlationIds order
                    pass
                response = await task
            except KafkaError:
                continue
            else:
                # To avoid having a connection in undefined state
                if node_id != "bootstrap" and conn.connected():
                    conn.close()
                if isinstance(request, ApiVersionRequest_v0):
                    # Starting from 0.10 kafka broker we determine version
                    # by looking at ApiVersionResponse
                    version = self._check_api_version_response(response)
                return version

        raise UnrecognizedBrokerVersion()
Example #15
0
    async def bootstrap(self):
        """Try to to bootstrap initial cluster metadata"""
        assert self._loop is get_running_loop(), (
            "Please create objects with the same loop as running with")
        # using request v0 for bootstrap if not sure v1 is available
        if self._api_version == "auto" or self._api_version < (0, 10):
            metadata_request = MetadataRequest[0]([])
        else:
            metadata_request = MetadataRequest[1]([])

        version_hint = None
        if self._api_version != "auto":
            version_hint = self._api_version

        for host, port, _ in self.hosts:
            log.debug("Attempting to bootstrap via node at %s:%s", host, port)

            try:
                bootstrap_conn = await create_conn(
                    host,
                    port,
                    client_id=self._client_id,
                    request_timeout_ms=self._request_timeout_ms,
                    ssl_context=self._ssl_context,
                    security_protocol=self._security_protocol,
                    max_idle_ms=self._connections_max_idle_ms,
                    sasl_mechanism=self._sasl_mechanism,
                    sasl_plain_username=self._sasl_plain_username,
                    sasl_plain_password=self._sasl_plain_password,
                    sasl_kerberos_service_name=self.
                    _sasl_kerberos_service_name,  # noqa: ignore=E501
                    sasl_kerberos_domain_name=self._sasl_kerberos_domain_name,
                    sasl_oauth_token_provider=self._sasl_oauth_token_provider,
                    version_hint=version_hint)
            except (OSError, asyncio.TimeoutError) as err:
                log.error('Unable connect to "%s:%s": %s', host, port, err)
                continue

            try:
                metadata = await bootstrap_conn.send(metadata_request)
            except (KafkaError, asyncio.TimeoutError) as err:
                log.warning('Unable to request metadata from "%s:%s": %s',
                            host, port, err)
                bootstrap_conn.close()
                continue

            self.cluster.update_metadata(metadata)

            # A cluster with no topics can return no broker metadata...
            # In that case, we should keep the bootstrap connection till
            # we get a normal cluster layout.
            if not len(self.cluster.brokers()):
                bootstrap_id = ('bootstrap', ConnectionGroup.DEFAULT)
                self._conns[bootstrap_id] = bootstrap_conn
            else:
                bootstrap_conn.close()

            log.debug('Received cluster metadata: %s', self.cluster)
            break
        else:
            raise KafkaConnectionError('Unable to bootstrap from {}'.format(
                self.hosts))

        # detect api version if need
        if self._api_version == 'auto':
            self._api_version = await self.check_version()

        if self._sync_task is None:
            # starting metadata synchronizer task
            self._sync_task = create_task(self._md_synchronizer())
    async def test_basic(self):
        cluster = ClusterMetadata(metadata_max_age_ms=10000)
        ma = MessageAccumulator(cluster, 1000, 0, 30)
        data_waiter = ma.data_waiter()
        done, _ = await asyncio.wait([data_waiter], timeout=0.2)
        self.assertFalse(bool(done))  # no data in accumulator yet...

        tp0 = TopicPartition("test-topic", 0)
        tp1 = TopicPartition("test-topic", 1)
        await ma.add_message(tp0, b'key', b'value', timeout=2)
        await ma.add_message(tp1, None, b'value without key', timeout=2)

        done, _ = await asyncio.wait([data_waiter], timeout=0.2)
        self.assertTrue(bool(done))

        batches, unknown_leaders_exist = ma.drain_by_nodes(ignore_nodes=[])
        self.assertEqual(batches, {})
        self.assertEqual(unknown_leaders_exist, True)

        def mocked_leader_for_partition(tp):
            if tp == tp0:
                return 0
            if tp == tp1:
                return 1
            return -1

        cluster.leader_for_partition = mock.MagicMock()
        cluster.leader_for_partition.side_effect = mocked_leader_for_partition
        batches, unknown_leaders_exist = ma.drain_by_nodes(ignore_nodes=[])
        self.assertEqual(len(batches), 2)
        self.assertEqual(unknown_leaders_exist, False)
        m_set0 = batches[0].get(tp0)
        self.assertEqual(type(m_set0), MessageBatch)
        m_set1 = batches[1].get(tp1)
        self.assertEqual(type(m_set1), MessageBatch)
        self.assertEqual(m_set0.expired(), False)

        data_waiter = asyncio.ensure_future(ma.data_waiter())
        done, _ = await asyncio.wait([data_waiter], timeout=0.2)
        self.assertFalse(bool(done))  # no data in accumulator again...

        # testing batch overflow
        tp2 = TopicPartition("test-topic", 2)
        await ma.add_message(tp0, None, b'some short message', timeout=2)
        await ma.add_message(tp0, None, b'some other short message', timeout=2)
        await ma.add_message(tp1, None, b'0123456789' * 70, timeout=2)
        await ma.add_message(tp2,
                             None,
                             b'message to unknown leader',
                             timeout=2)
        # next we try to add message with len=500,
        # as we have buffer_size=1000 coroutine will block until data will be
        # drained
        add_task = create_task(
            ma.add_message(tp1, None, b'0123456789' * 50, timeout=2))
        done, _ = await asyncio.wait([add_task], timeout=0.2)
        self.assertFalse(bool(done))

        batches, unknown_leaders_exist = ma.drain_by_nodes(ignore_nodes=[1, 2])
        self.assertEqual(unknown_leaders_exist, True)
        m_set0 = batches[0].get(tp0)
        self.assertEqual(m_set0._builder._relative_offset, 2)
        m_set1 = batches[1].get(tp1)
        self.assertEqual(m_set1, None)

        done, _ = await asyncio.wait([add_task], timeout=0.1)
        self.assertFalse(bool(done))  # we still not drained data for tp1

        batches, unknown_leaders_exist = ma.drain_by_nodes(ignore_nodes=[])
        self.assertEqual(unknown_leaders_exist, True)
        m_set0 = batches[0].get(tp0)
        self.assertEqual(m_set0, None)
        m_set1 = batches[1].get(tp1)
        self.assertEqual(m_set1._builder._relative_offset, 1)

        done, _ = await asyncio.wait([add_task], timeout=0.2)
        self.assertTrue(bool(done))
        batches, unknown_leaders_exist = ma.drain_by_nodes(ignore_nodes=[])
        self.assertEqual(unknown_leaders_exist, True)
        m_set1 = batches[1].get(tp1)
        self.assertEqual(m_set1._builder._relative_offset, 1)