def ensure_coordinator_known(self): """Block until the coordinator for this group is known (and we have an active connection -- Java client uses unsent queue). """ while (yield from self.coordinator_unknown()): node_id = self._client.get_random_node() if node_id is None or not (yield from self._client.ready(node_id)): raise Errors.NoBrokersAvailable() log.debug( "Sending group coordinator request for group %s to broker %s", self.group_id, node_id) request = GroupCoordinatorRequest(self.group_id) try: resp = yield from self._send_req(node_id, request, group=ConnectionGroup.DEFAULT) except Errors.KafkaError as err: log.error("Group Coordinator Request failed: %s", err) yield from asyncio.sleep(self._retry_backoff_ms / 1000, loop=self.loop) if err.retriable is True: yield from self._client.force_metadata_update() else: log.debug("Received group coordinator response %s", resp) if not (yield from self.coordinator_unknown()): # We already found the coordinator, so ignore the response log.debug("Coordinator already known, ignoring response") break self.coordinator_id = resp.coordinator_id log.info("Discovered coordinator %s for group %s", self.coordinator_id, self.group_id)
def test_correlation_id_on_group_coordinator_req(self): host, port = self.kafka_host, self.kafka_port request = GroupCoordinatorRequest(consumer_group='test') # setup connection with mocked reader and writer conn = AIOKafkaConnection(host=host, port=port, loop=self.loop) # setup reader reader = mock.MagicMock() int32 = struct.Struct('>i') resp = GroupCoordinatorResponse( error_code=0, coordinator_id=22, host='127.0.0.1', port=3333) resp = resp.encode() resp = int32.pack(0) + resp # set correlation id to 0 reader.readexactly.side_effect = [ asyncio.coroutine(lambda *a, **kw: int32.pack(len(resp)))(), asyncio.coroutine(lambda *a, **kw: resp)()] writer = mock.MagicMock() conn._reader = reader conn._writer = writer # invoke reader task conn._read_task = asyncio.async(conn._read(), loop=self.loop) response = yield from conn.send(request) self.assertIsInstance(response, GroupCoordinatorResponse) self.assertEqual(response.error_code, 0) self.assertEqual(response.coordinator_id, 22) self.assertEqual(response.host, '127.0.0.1') self.assertEqual(response.port, 3333)
def _send_group_metadata_request(self): """Discover the current coordinator for the group. Returns: Future: resolves to the node id of the coordinator """ node_id = self._client.least_loaded_node() if node_id is None or not self._client.ready(node_id): return Future().failure(Errors.NoBrokersAvailable()) log.debug("Issuing group metadata request to broker %s", node_id) request = GroupCoordinatorRequest(self.group_id) future = Future() _f = self._client.send(node_id, request) _f.add_callback(self._handle_group_coordinator_response, future) _f.add_errback(self._failed_request, node_id, request, future) return future
def check_version(self, node_id=None): """Attempt to guess the broker version""" if node_id is None: if self._conns: node_id = list(self._conns.keys())[0] else: assert self.cluster.brokers(), 'no brokers in metadata' node_id = list(self.cluster.brokers())[0].nodeId from kafka.protocol.admin import ListGroupsRequest from kafka.protocol.commit import (OffsetFetchRequest_v0, GroupCoordinatorRequest) from kafka.protocol.metadata import MetadataRequest test_cases = [ ('0.9', ListGroupsRequest()), ('0.8.2', GroupCoordinatorRequest('kafka-python-default-group')), ('0.8.1', OffsetFetchRequest_v0('kafka-python-default-group', [])), ('0.8.0', MetadataRequest([])), ] # kafka kills the connection when it doesnt recognize an API request # so we can send a test request and then follow immediately with a # vanilla MetadataRequest. If the server did not recognize the first # request, both will be failed with a ConnectionError that wraps # socket.error (32, 54, or 104) conn = yield from self._get_conn(node_id) if conn is None: raise ConnectionError( "No connection to node with id {}".format(node_id)) for version, request in test_cases: try: if not conn.connected(): yield from conn.connect() assert conn, 'no connection to node with id {}'.format(node_id) yield from conn.send(request) except KafkaError: continue else: return version raise UnrecognizedBrokerVersion()
async def test_correlation_id_on_group_coordinator_req(self): host, port = self.kafka_host, self.kafka_port request = GroupCoordinatorRequest(consumer_group='test') # setup connection with mocked reader and writer conn = AIOKafkaConnection(host=host, port=port) # setup reader reader = mock.MagicMock() int32 = struct.Struct('>i') resp = GroupCoordinatorResponse(error_code=0, coordinator_id=22, host='127.0.0.1', port=3333) resp = resp.encode() resp = int32.pack(0) + resp # set correlation id to 0 async def first_resp(*args: Any, **kw: Any): return int32.pack(len(resp)) async def second_resp(*args: Any, **kw: Any): return resp reader.readexactly.side_effect = [first_resp(), second_resp()] writer = mock.MagicMock() conn._reader = reader conn._writer = writer # invoke reader task conn._read_task = conn._create_reader_task() response = await conn.send(request) self.assertIsInstance(response, GroupCoordinatorResponse) self.assertEqual(response.error_code, 0) self.assertEqual(response.coordinator_id, 22) self.assertEqual(response.host, '127.0.0.1') self.assertEqual(response.port, 3333)