def test_send_request(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() node_id = client.get_random_node() resp = yield from client.send(node_id, MetadataRequest([])) self.assertTrue(isinstance(resp, MetadataResponse)) yield from client.close()
def test_client_receive_zero_brokers(self): brokers = [ (0, 'broker_1', 4567), (1, 'broker_2', 5678) ] correct_meta = MetadataResponse(brokers, []) bad_response = MetadataResponse([], []) @asyncio.coroutine def send(*args, **kwargs): return bad_response client = AIOKafkaClient(loop=self.loop, bootstrap_servers=['broker_1:4567'], api_version="0.10") conn = mock.Mock() client._conns = [mock.Mock()] client._get_conn = mock.Mock() client._get_conn.side_effect = asyncio.coroutine(lambda x: conn) conn.send = mock.Mock() conn.send.side_effect = send client.cluster.update_metadata(correct_meta) brokers_before = client.cluster.brokers() yield from client._metadata_update(client.cluster, []) # There broker list should not be purged self.assertNotEqual(client.cluster.brokers(), set([])) self.assertEqual(client.cluster.brokers(), brokers_before)
def test_send_broker_unaware_request_fail(self): "Tests that call fails when all hosts are unavailable" mocked_conns = {("kafka01", 9092): mock.MagicMock(), ("kafka02", 9092): mock.MagicMock()} # inject KafkaConnection side effects fut1 = asyncio.Future(loop=self.loop) fut1.set_exception(RuntimeError("kafka01 went away (unittest)")) mocked_conns[("kafka01", 9092)].send.return_value = fut1 fut2 = asyncio.Future(loop=self.loop) fut2.set_exception(RuntimeError("kafka02 went away (unittest)")) mocked_conns[("kafka02", 9092)].send.return_value = fut2 client = AIOKafkaClient(["kafka01:9092", "kafka02:9092"], loop=self.loop) client._conns = mocked_conns @asyncio.coroutine def go(): with self.assertRaises(KafkaUnavailableError): yield from client._send_broker_unaware_request( payloads=["fake request"], encoder_fn=mock.MagicMock(return_value=b"fake encoded message"), decoder_fn=lambda x: x, ) for key, conn in mocked_conns.items(): conn.send.assert_called_with(b"fake encoded message") self.loop.run_until_complete(go())
def test_send_produce_request_raises_when_noleader(self, protocol): """Send producer request raises LeaderNotAvailableError if leader is not available""" @asyncio.coroutine def recv(request_id): return b"response" mocked_conns = {("broker_1", 4567): mock.MagicMock()} mocked_conns[("broker_1", 4567)].recv.side_effect = recv client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) client._conns = mocked_conns brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)] topics = [ TopicMetadata( "topic_noleader", NO_ERROR, [ PartitionMetadata("topic_noleader", 0, -1, [], [], NO_LEADER), PartitionMetadata("topic_noleader", 1, -1, [], [], NO_LEADER), ], ) ] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) requests = [ProduceRequest("topic_noleader", 0, [create_message("a"), create_message("b")])] with self.assertRaises(LeaderNotAvailableError): self.loop.run_until_complete(client.send_produce_request(requests))
def test_send_broker_unaware_request(self): 'Tests that call works when at least one of the host is available' mocked_conns = { ('kafka01', 9092): mock.MagicMock(), ('kafka02', 9092): mock.MagicMock(), ('kafka03', 9092): mock.MagicMock() } # inject KafkaConnection side effects fut = asyncio.Future(loop=self.loop) fut.set_exception(RuntimeError("kafka01 went away (unittest)")) mocked_conns[('kafka01', 9092)].send.return_value = fut fut2 = asyncio.Future(loop=self.loop) fut2.set_result(b'valid response') mocked_conns[('kafka02', 9092)].send.return_value = fut2 fut3 = asyncio.Future(loop=self.loop) fut3.set_exception(RuntimeError("kafka03 went away (unittest)")) mocked_conns[('kafka03', 9092)].send.return_value = fut3 client = AIOKafkaClient('kafka01:9092,kafka02:9092', loop=self.loop) client._conns = mocked_conns resp = self.loop.run_until_complete( client._send_broker_unaware_request(payloads=[b'fake request'], encoder_fn=mock.MagicMock(), decoder_fn=lambda x: x)) self.assertEqual(b'valid response', resp)
def test_compacted_topic_consumption(self): # Compacted topics can have offsets skipped client = AIOKafkaClient( loop=self.loop, bootstrap_servers=[]) client.ready = mock.MagicMock() client.ready.side_effect = asyncio.coroutine(lambda a: True) client.force_metadata_update = mock.MagicMock() client.force_metadata_update.side_effect = asyncio.coroutine( lambda: False) client.send = mock.MagicMock() subscriptions = SubscriptionState(loop=self.loop) fetcher = Fetcher(client, subscriptions, loop=self.loop) tp = TopicPartition('test', 0) req = FetchRequest( -1, # replica_id 100, 100, [(tp.topic, [(tp.partition, 155, 100000)])]) builder = LegacyRecordBatchBuilder( magic=1, compression_type=0, batch_size=99999999) builder.append(160, value=b"12345", key=b"1", timestamp=None) builder.append(162, value=b"23456", key=b"2", timestamp=None) builder.append(167, value=b"34567", key=b"3", timestamp=None) batch = bytes(builder.build()) resp = FetchResponse( [('test', [( 0, 0, 3000, # partition, error_code, highwater_offset batch # Batch raw bytes )])]) subscriptions.assign_from_user({tp}) assignment = subscriptions.subscription.assignment tp_state = assignment.state_value(tp) client.send.side_effect = asyncio.coroutine(lambda n, r: resp) tp_state.seek(155) fetcher._in_flight.add(0) needs_wake_up = yield from fetcher._proc_fetch_request( assignment, 0, req) self.assertEqual(needs_wake_up, True) buf = fetcher._records[tp] # Test successful getone, the closest in batch offset=160 first = buf.getone() self.assertEqual(tp_state.position, 161) self.assertEqual( (first.value, first.key, first.offset), (b"12345", b"1", 160)) # Test successful getmany second, third = buf.getall() self.assertEqual(tp_state.position, 168) self.assertEqual( (second.value, second.key, second.offset), (b"23456", b"2", 162)) self.assertEqual( (third.value, third.key, third.offset), (b"34567", b"3", 167))
def test_subscribe_pattern(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() test_listener = RebalanceListenerForTest() subscription = SubscriptionState('latest') subscription.subscribe(pattern='st-topic*', listener=test_listener) coordinator = GroupCoordinator( client, subscription, loop=self.loop, group_id='subs-pattern-group') yield from self.wait_topic(client, 'st-topic1') yield from self.wait_topic(client, 'st-topic2') yield from coordinator.ensure_active_group() self.assertNotEqual(coordinator.coordinator_id, None) self.assertEqual(coordinator.rejoin_needed, False) tp_list = subscription.assigned_partitions() assigned = set([('st-topic1', 0), ('st-topic1', 1), ('st-topic2', 0), ('st-topic2', 1)]) self.assertEqual(tp_list, assigned) self.assertEqual(test_listener.revoked, [set([])]) self.assertEqual(test_listener.assigned, [assigned]) yield from coordinator.close() yield from client.close()
def test_subscribe_pattern(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() test_listener = RebalanceListenerForTest() subscription = SubscriptionState('latest') subscription.subscribe(pattern='st-topic*', listener=test_listener) coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id='subs-pattern-group') yield from self.wait_topic(client, 'st-topic1') yield from self.wait_topic(client, 'st-topic2') yield from coordinator.ensure_active_group() self.assertNotEqual(coordinator.coordinator_id, None) self.assertEqual(coordinator.rejoin_needed, False) tp_list = subscription.assigned_partitions() assigned = set([('st-topic1', 0), ('st-topic1', 1), ('st-topic2', 0), ('st-topic2', 1)]) self.assertEqual(tp_list, assigned) self.assertEqual(test_listener.revoked, [set([])]) self.assertEqual(test_listener.assigned, [assigned]) yield from coordinator.close() yield from client.close()
async def test_compacted_topic_consumption(self): # Compacted topics can have offsets skipped client = AIOKafkaClient( loop=self.loop, bootstrap_servers=[]) client.ready = mock.MagicMock() client.ready.side_effect = asyncio.coroutine(lambda a: True) client.force_metadata_update = mock.MagicMock() client.force_metadata_update.side_effect = asyncio.coroutine( lambda: False) client.send = mock.MagicMock() subscriptions = SubscriptionState(loop=self.loop) fetcher = Fetcher(client, subscriptions, loop=self.loop) tp = TopicPartition('test', 0) req = FetchRequest( -1, # replica_id 100, 100, [(tp.topic, [(tp.partition, 155, 100000)])]) builder = LegacyRecordBatchBuilder( magic=1, compression_type=0, batch_size=99999999) builder.append(160, value=b"12345", key=b"1", timestamp=None) builder.append(162, value=b"23456", key=b"2", timestamp=None) builder.append(167, value=b"34567", key=b"3", timestamp=None) batch = bytes(builder.build()) resp = FetchResponse( [('test', [( 0, 0, 3000, # partition, error_code, highwater_offset batch # Batch raw bytes )])]) subscriptions.assign_from_user({tp}) assignment = subscriptions.subscription.assignment tp_state = assignment.state_value(tp) client.send.side_effect = asyncio.coroutine(lambda n, r: resp) tp_state.seek(155) fetcher._in_flight.add(0) needs_wake_up = await fetcher._proc_fetch_request( assignment, 0, req) self.assertEqual(needs_wake_up, True) buf = fetcher._records[tp] # Test successful getone, the closest in batch offset=160 first = buf.getone() self.assertEqual(tp_state.position, 161) self.assertEqual( (first.value, first.key, first.offset), (b"12345", b"1", 160)) # Test successful getmany second, third = buf.getall() self.assertEqual(tp_state.position, 168) self.assertEqual( (second.value, second.key, second.offset), (b"23456", b"2", 162)) self.assertEqual( (third.value, third.key, third.offset), (b"34567", b"3", 167))
def test_failed_group_join(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) subscription = SubscriptionState("latest") subscription.subscribe(topics=("topic1",)) coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10) yield from client.bootstrap() yield from self.wait_topic(client, "topic1") mocked = mock.MagicMock() coordinator._client = mocked # no exception expected, just wait mocked.send.side_effect = Errors.GroupLoadInProgressError() yield from coordinator._perform_group_join() self.assertEqual(coordinator.need_rejoin(), True) mocked.send.side_effect = Errors.InvalidGroupIdError() yield from coordinator._perform_group_join() self.assertEqual(coordinator.need_rejoin(), True) # no exception expected, member_id should be reseted coordinator.member_id = "some_invalid_member_id" mocked.send.side_effect = Errors.UnknownMemberIdError() yield from coordinator._perform_group_join() self.assertEqual(coordinator.need_rejoin(), True) self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID) # no exception expected, coordinator_id should be reseted coordinator.coordinator_id = "some_id" mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError() yield from coordinator._perform_group_join() self.assertEqual(coordinator.need_rejoin(), True) self.assertEqual(coordinator.coordinator_id, None) yield from client.close()
def test_no_concurrent_send_on_connection(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) yield from client.bootstrap() self.add_cleanup(client.close) yield from self.wait_topic(client, self.topic) node_id = client.get_random_node() wait_request = FetchRequest_v0( -1, # replica_id 500, # max_wait_ms 1024 * 1024, # min_bytes [(self.topic, [(0, 0, 1024)] )]) vanila_request = MetadataRequest([]) send_time = self.loop.time() long_task = self.loop.create_task( client.send(node_id, wait_request) ) yield from asyncio.sleep(0.0001, loop=self.loop) self.assertFalse(long_task.done()) yield from client.send(node_id, vanila_request) resp_time = self.loop.time() fetch_resp = yield from long_task # Check error code like resp->topics[0]->partitions[0]->error_code self.assertEqual(fetch_resp.topics[0][1][0][1], 0) # Check that vanila request actually executed after wait request self.assertGreaterEqual(resp_time - send_time, 0.5)
def test_coordinator_subscription_append_on_rebalance(self): # same as above, but with adding topics instead of replacing them client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') yield from self.wait_topic(client, 'topic2') subscription = SubscriptionState('earliest') subscription.subscribe(topics=('topic1',)) coordinator = GroupCoordinator( client, subscription, loop=self.loop, group_id='race-rebalance-subscribe-append', heartbeat_interval_ms=20000000) _perform_assignment = coordinator._perform_assignment with mock.patch.object(coordinator, '_perform_assignment') as mocked: def _new(*args, **kw): # Change the subscription to different topic before we finish # rebalance res = _perform_assignment(*args, **kw) subscription.subscribe(topics=('topic1', 'topic2', )) client.set_topics(('topic1', 'topic2', )) return res mocked.side_effect = _new yield from coordinator.ensure_active_group() self.assertEqual(subscription.needs_partition_assignment, False) topics = set([tp.topic for tp in subscription.assignment]) self.assertEqual(topics, {'topic1', 'topic2'}) yield from coordinator.close() yield from client.close()
def test_coordinator_ensure_active_group_on_expired_membership(self): # Do not fail ensure_active_group() if group membership has expired client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') subscription = SubscriptionState('earliest') subscription.subscribe(topics=('topic1', )) coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id='test-offsets-group') yield from coordinator.ensure_active_group() # during OffsetCommit, UnknownMemberIdError is raised offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')} with mock.patch('aiokafka.errors.for_code') as mocked: mocked.return_value = Errors.UnknownMemberIdError with self.assertRaises(Errors.UnknownMemberIdError): yield from coordinator.commit_offsets(offsets) self.assertEqual(subscription.needs_partition_assignment, True) # same exception is raised during ensure_active_group()'s call to # commit_offsets() via _on_join_prepare() but doesn't break this method with mock.patch.object(coordinator, "commit_offsets") as mocked: @asyncio.coroutine def mock_commit_offsets(*args, **kwargs): raise Errors.UnknownMemberIdError() mocked.side_effect = mock_commit_offsets yield from coordinator.ensure_active_group() yield from coordinator.close() yield from client.close()
def test_send_broker_unaware_request(self): "Tests that call works when at least one of the host is available" mocked_conns = { ("kafka01", 9092): mock.MagicMock(), ("kafka02", 9092): mock.MagicMock(), ("kafka03", 9092): mock.MagicMock(), } # inject KafkaConnection side effects fut = asyncio.Future(loop=self.loop) fut.set_exception(RuntimeError("kafka01 went away (unittest)")) mocked_conns[("kafka01", 9092)].send.return_value = fut fut2 = asyncio.Future(loop=self.loop) fut2.set_result(b"valid response") mocked_conns[("kafka02", 9092)].send.return_value = fut2 fut3 = asyncio.Future(loop=self.loop) fut3.set_exception(RuntimeError("kafka03 went away (unittest)")) mocked_conns[("kafka03", 9092)].send.return_value = fut3 client = AIOKafkaClient("kafka01:9092,kafka02:9092", loop=self.loop) client._conns = mocked_conns resp = self.loop.run_until_complete( client._send_broker_unaware_request( payloads=[b"fake request"], encoder_fn=mock.MagicMock(), decoder_fn=lambda x: x ) ) self.assertEqual(b"valid response", resp)
def test_client_receive_zero_brokers(self): brokers = [ (0, 'broker_1', 4567), (1, 'broker_2', 5678) ] correct_meta = MetadataResponse(brokers, []) bad_response = MetadataResponse([], []) @asyncio.coroutine def send(*args, **kwargs): return bad_response client = AIOKafkaClient(loop=self.loop, bootstrap_servers=['broker_1:4567'], api_version=(0, 10)) conn = mock.Mock() client._conns = [mock.Mock()] client._get_conn = mock.Mock() client._get_conn.side_effect = asyncio.coroutine(lambda x: conn) conn.send = mock.Mock() conn.send.side_effect = send client.cluster.update_metadata(correct_meta) brokers_before = client.cluster.brokers() yield from client._metadata_update(client.cluster, []) # There broker list should not be purged self.assertNotEqual(client.cluster.brokers(), set([])) self.assertEqual(client.cluster.brokers(), brokers_before)
async def test_check_version(self): kafka_version = tuple(int(x) for x in self.kafka_version.split(".")) client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) await client.bootstrap() ver = await client.check_version() expected_version = kafka_version[:2] # No significant protocol changed, no way to differencieate if expected_version == (2, 2): expected_version = (2, 1) elif expected_version == (2, 4): expected_version = (2, 3) self.assertEqual(expected_version, ver[:2]) await self.wait_topic(client, 'some_test_topic') ver2 = await client.check_version() self.assertEqual(ver, ver2) ver2 = await client.check_version(client.get_random_node()) self.assertEqual(ver, ver2) with mock.patch.object(AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') with self.assertRaises(UnrecognizedBrokerVersion): await client.check_version(client.get_random_node()) client._get_conn = asyncio.coroutine(lambda _, **kw: None) with self.assertRaises(KafkaConnectionError): await client.check_version() await client.close()
async def test_check_version(self): kafka_version = tuple(int(x) for x in self.kafka_version.split(".")) client = AIOKafkaClient(bootstrap_servers=self.hosts) await client.bootstrap() ver = await client.check_version() expected_version = kafka_version[:2] self.assertEqual(expected_version, ver[:2]) await self.wait_topic(client, 'some_test_topic') ver2 = await client.check_version() self.assertEqual(ver, ver2) ver2 = await client.check_version(client.get_random_node()) self.assertEqual(ver, ver2) with mock.patch.object(AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') with self.assertRaises(UnrecognizedBrokerVersion): await client.check_version(client.get_random_node()) async def _get_conn(*args: Any, **kw: Any): return None client._get_conn = _get_conn with self.assertRaises(KafkaConnectionError): await client.check_version() await client.close()
async def test_no_concurrent_send_on_connection(self): client = AIOKafkaClient(bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) await self.wait_topic(client, self.topic) node_id = client.get_random_node() wait_request = FetchRequest_v0( -1, # replica_id 500, # max_wait_ms 1024 * 1024, # min_bytes [(self.topic, [(0, 0, 1024)])]) vanila_request = MetadataRequest([]) loop = get_running_loop() send_time = loop.time() long_task = create_task(client.send(node_id, wait_request)) await asyncio.sleep(0.0001) self.assertFalse(long_task.done()) await client.send(node_id, vanila_request) resp_time = loop.time() fetch_resp = await long_task # Check error code like resp->topics[0]->partitions[0]->error_code self.assertEqual(fetch_resp.topics[0][1][0][1], 0) # Check that vanila request actually executed after wait request self.assertGreaterEqual(resp_time - send_time, 0.5)
async def test_client_receive_zero_brokers_timeout_on_send(self): brokers = [(0, 'broker_1', 4567), (1, 'broker_2', 5678)] correct_meta = MetadataResponse(brokers, []) async def send(*args, **kwargs): raise asyncio.TimeoutError() client = AIOKafkaClient(bootstrap_servers=['broker_1:4567'], api_version="0.10") conn = mock.Mock() client._conns = [mock.Mock()] async def _get_conn(*args: Any, **kwargs: Any): return conn client._get_conn = mock.Mock() client._get_conn.side_effect = _get_conn conn.send = mock.Mock() conn.send.side_effect = send client.cluster.update_metadata(correct_meta) brokers_before = client.cluster.brokers() await client._metadata_update(client.cluster, []) # There broker list should not be purged self.assertNotEqual(client.cluster.brokers(), set()) self.assertEqual(client.cluster.brokers(), brokers_before)
async def test_send_request(self): client = AIOKafkaClient(bootstrap_servers=self.hosts) await client.bootstrap() node_id = client.get_random_node() resp = await client.send(node_id, MetadataRequest([])) self.assertTrue(isinstance(resp, MetadataResponse)) await client.close()
def test_send_produce_request_raises_when_topic_unknown(self, protocol): @asyncio.coroutine def recv(request_id): return b'response' mocked_conns = {('broker_1', 4567): mock.MagicMock()} mocked_conns[('broker_1', 4567)].recv.side_effect = recv client = AIOKafkaClient(['broker_1:4567'], loop=self.loop) client._conns = mocked_conns brokers = [ BrokerMetadata(0, 'broker_1', 4567), BrokerMetadata(1, 'broker_2', 5678) ] topics = [ TopicMetadata('topic_doesnt_exist', UNKNOWN_TOPIC_OR_PARTITION, []), ] protocol.decode_metadata_response.return_value = MetadataResponse( brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) requests = [ ProduceRequest( "topic_doesnt_exist", 0, [create_message("a"), create_message("b")]) ] with self.assertRaises(UnknownTopicOrPartitionError): self.loop.run_until_complete(client.send_produce_request(requests))
def test_get_offsets(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() subscription = SubscriptionState('earliest') subscription.subscribe(topics=('topic1',)) coordinator = GroupCoordinator( client, subscription, loop=self.loop, group_id='getoffsets-group') yield from self.wait_topic(client, 'topic1') producer = AIOKafkaProducer( loop=self.loop, bootstrap_servers=self.hosts) yield from producer.start() yield from producer.send('topic1', b'first msg', partition=0) yield from producer.send('topic1', b'second msg', partition=1) yield from producer.send('topic1', b'third msg', partition=1) yield from producer.stop() yield from coordinator.ensure_active_group() offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, ''), TopicPartition('topic1', 1): OffsetAndMetadata(2, '')} yield from coordinator.commit_offsets(offsets) self.assertEqual(subscription.all_consumed_offsets(), {}) subscription.seek(('topic1', 0), 0) subscription.seek(('topic1', 1), 0) yield from coordinator.refresh_committed_offsets() self.assertEqual(subscription.assignment[('topic1', 0)].committed, 1) self.assertEqual(subscription.assignment[('topic1', 1)].committed, 2) yield from coordinator.close() yield from client.close()
def test_send_broker_unaware_request_fail(self): 'Tests that call fails when all hosts are unavailable' mocked_conns = { ('kafka01', 9092): mock.MagicMock(), ('kafka02', 9092): mock.MagicMock() } # inject KafkaConnection side effects fut1 = asyncio.Future(loop=self.loop) fut1.set_exception(RuntimeError("kafka01 went away (unittest)")) mocked_conns[('kafka01', 9092)].send.return_value = fut1 fut2 = asyncio.Future(loop=self.loop) fut2.set_exception(RuntimeError("kafka02 went away (unittest)")) mocked_conns[('kafka02', 9092)].send.return_value = fut2 client = AIOKafkaClient(['kafka01:9092', 'kafka02:9092'], loop=self.loop) client._conns = mocked_conns @asyncio.coroutine def go(): with self.assertRaises(KafkaUnavailableError): yield from client._send_broker_unaware_request( payloads=['fake request'], encoder_fn=mock.MagicMock( return_value=b'fake encoded message'), decoder_fn=lambda x: x) for key, conn in mocked_conns.items(): conn.send.assert_called_with(b'fake encoded message') self.loop.run_until_complete(go())
def test_get_offsets(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() subscription = SubscriptionState("earliest") subscription.subscribe(topics=("topic1",)) coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group") yield from self.wait_topic(client, "topic1") producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts) yield from producer.start() yield from producer.send("topic1", b"first msg", partition=0) yield from producer.send("topic1", b"second msg", partition=1) yield from producer.send("topic1", b"third msg", partition=1) yield from producer.stop() yield from coordinator.ensure_active_group() offsets = { TopicPartition("topic1", 0): OffsetAndMetadata(1, ""), TopicPartition("topic1", 1): OffsetAndMetadata(2, ""), } yield from coordinator.commit_offsets(offsets) self.assertEqual(subscription.all_consumed_offsets(), {}) subscription.seek(("topic1", 0), 0) subscription.seek(("topic1", 1), 0) yield from coordinator.refresh_committed_offsets() self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1) self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2) yield from coordinator.close() yield from client.close()
def test_get_leader_for_unassigned_partitions(self, protocol): @asyncio.coroutine def recv(request_id): return b"response" mocked_conns = {("broker_1", 4567): mock.MagicMock()} mocked_conns[("broker_1", 4567)].recv.side_effect = recv client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) client._conns = mocked_conns brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)] topics = [ TopicMetadata("topic_no_partitions", NO_LEADER, []), TopicMetadata("topic_unknown", UNKNOWN_TOPIC_OR_PARTITION, []), ] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) self.assertDictEqual({}, client._topics_to_brokers) with self.assertRaises(LeaderNotAvailableError): self.loop.run_until_complete(client._get_leader_for_partition("topic_no_partitions", 0)) with self.assertRaises(UnknownTopicOrPartitionError): self.loop.run_until_complete(client._get_leader_for_partition("topic_unknown", 0))
def test_close(self): client = AIOKafkaClient(['broker_1:4567'], loop=self.loop) m1 = mock.Mock() m2 = mock.Mock() client._conns = {('host1', 4567): m1, ('host2', 5678): m2} client.close() self.assertEqual({}, client._conns) m1.close.assert_raises_with() m2.close.assert_raises_with()
def test_close(self): client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) m1 = mock.Mock() m2 = mock.Mock() client._conns = {("host1", 4567): m1, ("host2", 5678): m2} client.close() self.assertEqual({}, client._conns) m1.close.assert_raises_with() m2.close.assert_raises_with()
def test_offsets_failed_scenarios(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') subscription = SubscriptionState('earliest') subscription.subscribe(topics=('topic1', )) coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id='test-offsets-group') yield from coordinator.ensure_active_group() offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')} yield from coordinator.commit_offsets(offsets) with mock.patch('aiokafka.errors.for_code') as mocked: mocked.return_value = Errors.GroupAuthorizationFailedError with self.assertRaises(Errors.GroupAuthorizationFailedError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.TopicAuthorizationFailedError with self.assertRaises(Errors.TopicAuthorizationFailedError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.InvalidCommitOffsetSizeError with self.assertRaises(Errors.InvalidCommitOffsetSizeError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.GroupLoadInProgressError with self.assertRaises(Errors.GroupLoadInProgressError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.RebalanceInProgressError with self.assertRaises(Errors.RebalanceInProgressError): yield from coordinator.commit_offsets(offsets) self.assertEqual(subscription.needs_partition_assignment, True) subscription.needs_partition_assignment = False mocked.return_value = Errors.UnknownMemberIdError with self.assertRaises(Errors.UnknownMemberIdError): yield from coordinator.commit_offsets(offsets) self.assertEqual(subscription.needs_partition_assignment, True) mocked.return_value = KafkaError with self.assertRaises(KafkaError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.NotCoordinatorForGroupError with self.assertRaises(Errors.NotCoordinatorForGroupError): yield from coordinator.commit_offsets(offsets) self.assertEqual(coordinator.coordinator_id, None) with self.assertRaises(Errors.GroupCoordinatorNotAvailableError): yield from coordinator.commit_offsets(offsets) yield from coordinator.close() yield from client.close()
def __init__(self, *, loop, bootstrap_servers='localhost', client_id=None, metadata_max_age_ms=300000, request_timeout_ms=40000, api_version='auto', acks=1, key_serializer=None, value_serializer=None, compression_type=None, max_batch_size=16384, partitioner=DefaultPartitioner(), max_request_size=1048576, linger_ms=0, send_backoff_ms=100, retry_backoff_ms=100): if acks not in (0, 1, -1, 'all'): raise ValueError("Invalid ACKS parameter") if compression_type not in ('gzip', 'snappy', 'lz4', None): raise ValueError("Invalid compression type!") if api_version not in ('auto', '0.10', '0.9', '0.8.2', '0.8.1', '0.8.0'): raise ValueError("Unsupported Kafka version") self._PRODUCER_CLIENT_ID_SEQUENCE += 1 if client_id is None: client_id = 'aiokafka-producer-%s' % \ self._PRODUCER_CLIENT_ID_SEQUENCE if acks == 'all': acks = -1 self._acks = acks self._key_serializer = key_serializer self._value_serializer = value_serializer self._compression_type = compression_type self._partitioner = partitioner self._max_request_size = max_request_size self._request_timeout_ms = request_timeout_ms self.client = AIOKafkaClient(loop=loop, bootstrap_servers=bootstrap_servers, client_id=client_id, metadata_max_age_ms=metadata_max_age_ms, request_timeout_ms=request_timeout_ms, api_version=api_version) self._metadata = self.client.cluster self._message_accumulator = MessageAccumulator( self._metadata, max_batch_size, self._compression_type, self._request_timeout_ms / 1000, loop) self._sender_task = None self._in_flight = set() self._closed = False self._loop = loop self._retry_backoff = retry_backoff_ms / 1000 self._linger_time = linger_ms / 1000
def __init__(self, *topics, loop, bootstrap_servers='localhost', client_id='aiokafka-' + __version__, group_id=None, key_deserializer=None, value_deserializer=None, fetch_max_wait_ms=500, fetch_min_bytes=1, max_partition_fetch_bytes=1 * 1024 * 1024, request_timeout_ms=40 * 1000, retry_backoff_ms=100, reconnect_backoff_ms=50, auto_offset_reset='latest', enable_auto_commit=True, auto_commit_interval_ms=5000, check_crcs=True, metadata_max_age_ms=5 * 60 * 1000, partition_assignment_strategy=(RoundRobinPartitionAssignor, ), heartbeat_interval_ms=3000, session_timeout_ms=30000, consumer_timeout_ms=100, api_version='auto'): if api_version not in ('auto', '0.9'): raise ValueError("Unsupported Kafka API version") self._client = AIOKafkaClient(loop=loop, bootstrap_servers=bootstrap_servers, client_id=client_id, metadata_max_age_ms=metadata_max_age_ms, request_timeout_ms=request_timeout_ms) self._api_version = api_version self._group_id = group_id self._heartbeat_interval_ms = heartbeat_interval_ms self._retry_backoff_ms = retry_backoff_ms self._enable_auto_commit = enable_auto_commit self._auto_commit_interval_ms = auto_commit_interval_ms self._partition_assignment_strategy = partition_assignment_strategy self._key_deserializer = key_deserializer self._value_deserializer = value_deserializer self._fetch_min_bytes = fetch_min_bytes self._fetch_max_wait_ms = fetch_max_wait_ms self._max_partition_fetch_bytes = max_partition_fetch_bytes self._consumer_timeout = consumer_timeout_ms / 1000 self._check_crcs = check_crcs self._subscription = SubscriptionState(auto_offset_reset) self._fetcher = None self._coordinator = None self._closed = False self._loop = loop self._topics = topics if topics: self._client.set_topics(topics) self._subscription.subscribe(topics=topics)
def test_init_with_list(self): client = AIOKafkaClient( ['kafka01:9092', 'kafka02:9092', 'kafka03:9092'], loop=self.loop) self.assertTrue('KafkaClient' in client.__repr__()) self.assertEqual( sorted({ 'kafka01': 9092, 'kafka02': 9092, 'kafka03': 9092 }.items()), sorted(client.hosts))
def test_force_metadata_update_multiple_times(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) yield from client.bootstrap() self.add_cleanup(client.close) orig = client._metadata_update with mock.patch.object(client, '_metadata_update') as mocked: @asyncio.coroutine def new(*args, **kw): yield from asyncio.sleep(0.01, loop=self.loop) return (yield from orig(*args, **kw)) mocked.side_effect = new client.force_metadata_update() yield from asyncio.sleep(0.001, loop=self.loop) self.assertEqual(len(client._metadata_update.mock_calls), 1) client.force_metadata_update() yield from asyncio.sleep(0.001, loop=self.loop) self.assertEqual(len(client._metadata_update.mock_calls), 1) client.force_metadata_update() yield from asyncio.sleep(0.05, loop=self.loop) self.assertEqual(len(client._metadata_update.mock_calls), 1)
def test_failed_group_join(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) subscription = SubscriptionState('latest') subscription.subscribe(topics=('topic1', )) coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10) @asyncio.coroutine def do_rebalance(): rebalance = CoordinatorGroupRebalance( coordinator, coordinator.group_id, coordinator.coordinator_id, subscription.subscription, coordinator._assignors, coordinator._session_timeout_ms, coordinator._retry_backoff_ms, loop=self.loop) yield from rebalance.perform_group_join() yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') mocked = mock.MagicMock() coordinator._client = mocked # no exception expected, just wait mocked.send.side_effect = Errors.GroupLoadInProgressError() yield from do_rebalance() self.assertEqual(coordinator.need_rejoin(), True) mocked.send.side_effect = Errors.InvalidGroupIdError() with self.assertRaises(Errors.InvalidGroupIdError): yield from do_rebalance() self.assertEqual(coordinator.need_rejoin(), True) # no exception expected, member_id should be reseted coordinator.member_id = 'some_invalid_member_id' mocked.send.side_effect = Errors.UnknownMemberIdError() yield from do_rebalance() self.assertEqual(coordinator.need_rejoin(), True) self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID) # no exception expected, coordinator_id should be reseted coordinator.coordinator_id = 'some_id' mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError() yield from do_rebalance() self.assertEqual(coordinator.need_rejoin(), True) self.assertEqual(coordinator.coordinator_id, None) yield from client.close()
def test_init_with_list(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=['kafka01:9092', 'kafka02:9092', 'kafka03:9092']) self.assertEqual( '<AIOKafkaClient client_id=aiokafka-0.0.1>', client.__repr__()) self.assertEqual(sorted({'kafka01': 9092, 'kafka02': 9092, 'kafka03': 9092}.items()), sorted(client.hosts)) node = client.get_random_node() self.assertEqual(node, None) # unknown cluster metadata
def test_metadata_update_fail(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() with mock.patch.object(AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') updated = yield from client.force_metadata_update() self.assertEqual(updated, False) with self.assertRaises(KafkaError): yield from client.fetch_all_metadata()
def test_offsets_failed_scenarios(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') subscription = SubscriptionState('earliest') subscription.subscribe(topics=('topic1',)) coordinator = GroupCoordinator( client, subscription, loop=self.loop, group_id='test-offsets-group') yield from coordinator.ensure_active_group() offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')} yield from coordinator.commit_offsets(offsets) with mock.patch('kafka.common.for_code') as mocked: mocked.return_value = Errors.GroupAuthorizationFailedError with self.assertRaises(Errors.GroupAuthorizationFailedError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.TopicAuthorizationFailedError with self.assertRaises(Errors.TopicAuthorizationFailedError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.InvalidCommitOffsetSizeError with self.assertRaises(Errors.InvalidCommitOffsetSizeError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.GroupLoadInProgressError with self.assertRaises(Errors.GroupLoadInProgressError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.RebalanceInProgressError with self.assertRaises(Errors.RebalanceInProgressError): yield from coordinator.commit_offsets(offsets) self.assertEqual(subscription.needs_partition_assignment, True) mocked.return_value = KafkaError with self.assertRaises(KafkaError): yield from coordinator.commit_offsets(offsets) mocked.return_value = Errors.NotCoordinatorForGroupError with self.assertRaises(Errors.NotCoordinatorForGroupError): yield from coordinator.commit_offsets(offsets) self.assertEqual(coordinator.coordinator_id, None) with self.assertRaises( Errors.GroupCoordinatorNotAvailableError): yield from coordinator.commit_offsets(offsets) yield from coordinator.close() yield from client.close()
def test_force_metadata_update_multiple_times(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) yield from client.bootstrap() self.add_cleanup(client.close) orig = client._metadata_update with mock.patch.object(client, '_metadata_update') as mocked: @asyncio.coroutine def new(*args, **kw): yield from asyncio.sleep(0.2, loop=self.loop) return (yield from orig(*args, **kw)) mocked.side_effect = new client.force_metadata_update() yield from asyncio.sleep(0.01, loop=self.loop) self.assertEqual( len(client._metadata_update.mock_calls), 1) client.force_metadata_update() yield from asyncio.sleep(0.01, loop=self.loop) self.assertEqual( len(client._metadata_update.mock_calls), 1) client.force_metadata_update() yield from asyncio.sleep(0.5, loop=self.loop) self.assertEqual( len(client._metadata_update.mock_calls), 1)
def test_metadata_update_fail(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() with mock.patch.object( AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') updated = yield from client.force_metadata_update() self.assertEqual(updated, False) with self.assertRaises(KafkaError): yield from client.fetch_all_metadata()
async def test_init_with_list(self): client = AIOKafkaClient(bootstrap_servers=[ '127.0.0.1:9092', '127.0.0.2:9092', '127.0.0.3:9092' ]) self.assertEqual('<AIOKafkaClient client_id=aiokafka-0.6.1.dev0>', client.__repr__()) self.assertEqual( sorted([('127.0.0.1', 9092, socket.AF_INET), ('127.0.0.2', 9092, socket.AF_INET), ('127.0.0.3', 9092, socket.AF_INET)]), sorted(client.hosts)) node = client.get_random_node() self.assertEqual(node, None) # unknown cluster metadata
def wait_kafka(cls): cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)] # Reconnecting until Kafka in docker becomes available client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts) for i in range(500): try: cls.loop.run_until_complete(client.bootstrap()) except ConnectionError: time.sleep(0.1) else: cls.loop.run_until_complete(client.close()) break
def test_init_with_list(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=[ '127.0.0.1:9092', '127.0.0.2:9092', '127.0.0.3:9092']) self.assertEqual( '<AIOKafkaClient client_id=aiokafka-0.1.2>', client.__repr__()) self.assertEqual( sorted([('127.0.0.1', 9092, socket.AF_INET), ('127.0.0.2', 9092, socket.AF_INET), ('127.0.0.3', 9092, socket.AF_INET)]), sorted(client.hosts)) node = client.get_random_node() self.assertEqual(node, None) # unknown cluster metadata
def test_send_timeout_deletes_connection(self): correct_response = MetadataResponse([], []) @asyncio.coroutine def send_exception(*args, **kwargs): raise asyncio.TimeoutError() @asyncio.coroutine def send(*args, **kwargs): return correct_response @asyncio.coroutine def get_conn(self, node_id, *, group=0): conn_id = (node_id, group) if conn_id in self._conns: conn = self._conns[conn_id] if not conn.connected(): del self._conns[conn_id] else: return conn conn = mock.MagicMock() conn.send.side_effect = send self._conns[conn_id] = conn return conn node_id = 0 conn = mock.MagicMock() conn.send.side_effect = send_exception conn.connected.return_value = True mocked_conns = {(node_id, 0): conn} client = AIOKafkaClient(loop=self.loop, bootstrap_servers=['broker_1:4567']) client._conns = mocked_conns client._get_conn = types.MethodType(get_conn, client) # first send timeouts with self.assertRaises(RequestTimedOutError): yield from client.send(0, MetadataRequest([])) conn.close.assert_called_once_with( reason=CloseReason.CONNECTION_TIMEOUT) # this happens because conn was closed conn.connected.return_value = False # second send gets new connection and obtains result response = yield from client.send(0, MetadataRequest([])) self.assertEqual(response, correct_response) self.assertNotEqual(conn, client._conns[(node_id, 0)])
def test_coordinator_workflow(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') yield from self.wait_topic(client, 'topic2') subscription = SubscriptionState('latest') subscription.subscribe(topics=('topic1', 'topic2')) coordinator = GroupCoordinator( client, subscription, loop=self.loop, session_timeout_ms=10000, heartbeat_interval_ms=500, retry_backoff_ms=100) self.assertEqual(coordinator.coordinator_id, None) self.assertEqual(coordinator.rejoin_needed, True) yield from coordinator.ensure_coordinator_known() self.assertNotEqual(coordinator.coordinator_id, None) yield from coordinator.ensure_active_group() self.assertNotEqual(coordinator.coordinator_id, None) self.assertEqual(coordinator.rejoin_needed, False) tp_list = subscription.assigned_partitions() self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1), ('topic2', 0), ('topic2', 1)])) # start second coordinator client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client2.bootstrap() subscription2 = SubscriptionState('latest') subscription2.subscribe(topics=('topic1', 'topic2')) coordinator2 = GroupCoordinator( client2, subscription2, loop=self.loop, session_timeout_ms=10000, heartbeat_interval_ms=500, retry_backoff_ms=100) yield from coordinator2.ensure_active_group() yield from coordinator.ensure_active_group() tp_list = subscription.assigned_partitions() self.assertEqual(len(tp_list), 2) tp_list2 = subscription2.assigned_partitions() self.assertEqual(len(tp_list2), 2) tp_list |= tp_list2 self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1), ('topic2', 0), ('topic2', 1)])) yield from coordinator.close() yield from client.close() yield from asyncio.sleep(0.6, loop=self.loop) # wait heartbeat yield from coordinator2.ensure_active_group() tp_list = subscription2.assigned_partitions() self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1), ('topic2', 0), ('topic2', 1)])) yield from coordinator2.close() yield from client2.close()
async def test_different_connections_in_conn_groups(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) node_id = client.get_random_node() conn1 = await client._get_conn(node_id) conn2 = await client._get_conn( node_id, group=ConnectionGroup.COORDINATION) self.assertTrue(conn1 is not conn2) self.assertEqual((conn1.host, conn1.port), (conn2.host, conn2.port))
def _setup_error_after_data(self): subscriptions = SubscriptionState(loop=self.loop) client = AIOKafkaClient( loop=self.loop, bootstrap_servers=[]) fetcher = Fetcher(client, subscriptions, loop=self.loop) tp1 = TopicPartition('some_topic', 0) tp2 = TopicPartition('some_topic', 1) subscriptions.subscribe(set(["some_topic"])) subscriptions.assign_from_subscribed({tp1, tp2}) assignment = subscriptions.subscription.assignment subscriptions.seek(tp1, 0) subscriptions.seek(tp2, 0) # Add some data messages = [ConsumerRecord( topic="some_topic", partition=1, offset=0, timestamp=0, timestamp_type=0, key=None, value=b"some", checksum=None, serialized_key_size=0, serialized_value_size=4)] fetcher._records[tp2] = FetchResult( tp2, assignment=assignment, loop=self.loop, message_iterator=iter(messages), backoff=0, fetch_offset=0) # Add some error fetcher._records[tp1] = FetchError( loop=self.loop, error=OffsetOutOfRangeError({}), backoff=0) return fetcher, tp1, tp2, messages
def _setup_error_after_data(self): subscriptions = SubscriptionState('latest') client = AIOKafkaClient( loop=self.loop, bootstrap_servers=[]) fetcher = Fetcher(client, subscriptions, loop=self.loop) tp1 = TopicPartition('some_topic', 0) tp2 = TopicPartition('some_topic', 1) state = TopicPartitionState() state.seek(0) subscriptions.assignment[tp1] = state state = TopicPartitionState() state.seek(0) subscriptions.assignment[tp2] = state subscriptions.needs_partition_assignment = False # Add some data messages = [ConsumerRecord( topic="some_topic", partition=1, offset=0, timestamp=0, timestamp_type=0, key=None, value=b"some", checksum=None, serialized_key_size=0, serialized_value_size=4)] fetcher._records[tp2] = FetchResult( tp2, subscriptions=subscriptions, loop=self.loop, records=iter(messages), backoff=0) # Add some error fetcher._records[tp1] = FetchError( loop=self.loop, error=OffsetOutOfRangeError({}), backoff=0) return fetcher, tp1, tp2, messages
def test_get_leader_exceptions_when_noleader(self, protocol): @asyncio.coroutine def recv(request_id): return b"response" mocked_conns = {("broker_1", 4567): mock.MagicMock()} mocked_conns[("broker_1", 4567)].recv.side_effect = recv client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) client._conns = mocked_conns brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)] topics = [ TopicMetadata( "topic_noleader", NO_ERROR, [ PartitionMetadata("topic_noleader", 0, -1, [], [], NO_LEADER), PartitionMetadata("topic_noleader", 1, -1, [], [], NO_LEADER), ], ) ] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) self.assertDictEqual( {TopicAndPartition("topic_noleader", 0): None, TopicAndPartition("topic_noleader", 1): None}, client._topics_to_brokers, ) # No leader partitions -- raise LeaderNotAvailableError with self.assertRaises(LeaderNotAvailableError): self.assertIsNone(self.loop.run_until_complete(client._get_leader_for_partition("topic_noleader", 0))) with self.assertRaises(LeaderNotAvailableError): self.assertIsNone(self.loop.run_until_complete(client._get_leader_for_partition("topic_noleader", 1))) # Unknown partitions -- raise UnknownTopicOrPartitionError with self.assertRaises(UnknownTopicOrPartitionError): self.assertIsNone(self.loop.run_until_complete(client._get_leader_for_partition("topic_noleader", 2))) topics = [ TopicMetadata( "topic_noleader", NO_ERROR, [ PartitionMetadata("topic_noleader", 0, 0, [0, 1], [0, 1], NO_ERROR), PartitionMetadata("topic_noleader", 1, 1, [1, 0], [1, 0], NO_ERROR), ], ) ] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.assertEqual( brokers[0], self.loop.run_until_complete(client._get_leader_for_partition("topic_noleader", 0)) ) self.assertEqual( brokers[1], self.loop.run_until_complete(client._get_leader_for_partition("topic_noleader", 1)) )
def test_load_metadata(self): brokers = [(0, 'broker_1', 4567), (1, 'broker_2', 5678)] topics = [(NO_ERROR, 'topic_1', [(NO_ERROR, 0, 1, [1, 2], [1, 2])]), (NO_ERROR, 'topic_2', [ (NO_LEADER, 0, -1, [], []), (NO_LEADER, 1, 1, [], []), ]), (NO_LEADER, 'topic_no_partitions', []), (UNKNOWN_TOPIC_OR_PARTITION, 'topic_unknown', []), (NO_ERROR, 'topic_3', [(NO_ERROR, 0, 0, [0, 1], [0, 1]), (NO_ERROR, 1, 1, [1, 0], [1, 0]), (NO_ERROR, 2, 0, [0, 1], [0, 1])]), (NO_ERROR, 'topic_4', [ (NO_ERROR, 0, 0, [0, 1], [0, 1]), (REPLICA_NOT_AVAILABLE, 1, 1, [1, 0], [1, 0]), ])] @asyncio.coroutine def send(request_id): return MetadataResponse(brokers, topics) mocked_conns = {0: mock.MagicMock()} mocked_conns[0].send.side_effect = send client = AIOKafkaClient(loop=self.loop, bootstrap_servers=['broker_1:4567']) task = asyncio. async (client._md_synchronizer(), loop=self.loop) client._conns = mocked_conns client.cluster.update_metadata(MetadataResponse(brokers[:1], [])) self.loop.run_until_complete(client.force_metadata_update()) task.cancel() md = client.cluster c_brokers = md.brokers() self.assertEqual(len(c_brokers), 2) expected_brokers = [(0, 'broker_1', 4567, None), (1, 'broker_2', 5678, None)] self.assertEqual(sorted(expected_brokers), sorted(list(c_brokers))) c_topics = md.topics() self.assertEqual(len(c_topics), 4) self.assertEqual(md.partitions_for_topic('topic_1'), set([0])) self.assertEqual(md.partitions_for_topic('topic_2'), set([0, 1])) self.assertEqual(md.partitions_for_topic('topic_3'), set([0, 1, 2])) self.assertEqual(md.partitions_for_topic('topic_4'), set([0, 1])) self.assertEqual(md.available_partitions_for_topic('topic_2'), set([1])) mocked_conns[0].connected.return_value = False is_ready = self.loop.run_until_complete(client.ready(0)) self.assertEqual(is_ready, False) is_ready = self.loop.run_until_complete(client.ready(1)) self.assertEqual(is_ready, False) self.assertEqual(mocked_conns, {}) with self.assertRaises(NodeNotReadyError): self.loop.run_until_complete(client.send(0, None))
def test_set_topics_trigger_metadata_update(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) yield from client.bootstrap() self.add_cleanup(client.close) orig = client._metadata_update with mock.patch.object(client, '_metadata_update') as mocked: @asyncio.coroutine def new(*args, **kw): yield from asyncio.sleep(0.01, loop=self.loop) return (yield from orig(*args, **kw)) mocked.side_effect = new yield from client.set_topics(["topic1"]) self.assertEqual( len(client._metadata_update.mock_calls), 1) # Same topics list should not trigger update yield from client.set_topics(["topic1"]) self.assertEqual( len(client._metadata_update.mock_calls), 1) yield from client.set_topics(["topic1", "topic2"]) self.assertEqual( len(client._metadata_update.mock_calls), 2) # Less topics should not update too yield from client.set_topics(["topic2"]) self.assertEqual( len(client._metadata_update.mock_calls), 2) # Setting [] should force update as it meens all topics yield from client.set_topics([]) self.assertEqual( len(client._metadata_update.mock_calls), 3) # Changing topics during refresh should trigger 2 refreshes client.set_topics(["topic3"]) yield from asyncio.sleep(0.001, loop=self.loop) self.assertEqual( len(client._metadata_update.mock_calls), 4) yield from client.set_topics(["topic3", "topics4"]) self.assertEqual( len(client._metadata_update.mock_calls), 5)
def wait_kafka(cls): cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)] # Reconnecting until Kafka in docker becomes available for i in range(500): client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts) try: cls.loop.run_until_complete(client.bootstrap()) # Broker can still be loading cluster layout, so we can get 0 # brokers. That counts as still not available if client.cluster.brokers(): return except ConnectionError: pass finally: cls.loop.run_until_complete(client.close()) time.sleep(0.1) assert False, "Kafka server never started"
def test_update_fetch_positions(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=[]) subscriptions = SubscriptionState('latest') fetcher = Fetcher(client, subscriptions, loop=self.loop) partition = TopicPartition('test', 0) # partition is not assigned, should be ignored yield from fetcher.update_fetch_positions([partition]) state = TopicPartitionState() state.seek(0) subscriptions.assignment[partition] = state # partition is fetchable, no need to update position yield from fetcher.update_fetch_positions([partition]) client.ready = mock.MagicMock() client.ready.side_effect = asyncio.coroutine(lambda a: True) client.force_metadata_update = mock.MagicMock() client.force_metadata_update.side_effect = asyncio.coroutine( lambda: False) client.send = mock.MagicMock() client.send.side_effect = asyncio.coroutine( lambda n, r: OffsetResponse([('test', [(0, 0, [4])])])) state.await_reset(OffsetResetStrategy.LATEST) client.cluster.leader_for_partition = mock.MagicMock() client.cluster.leader_for_partition.side_effect = [None, -1, 0] yield from fetcher.update_fetch_positions([partition]) self.assertEqual(state.position, 4) client.cluster.leader_for_partition = mock.MagicMock() client.cluster.leader_for_partition.return_value = 1 client.send = mock.MagicMock() client.send.side_effect = asyncio.coroutine( lambda n, r: OffsetResponse([('test', [(0, 3, [])])])) state.await_reset(OffsetResetStrategy.LATEST) with self.assertRaises(UnknownTopicOrPartitionError): yield from fetcher.update_fetch_positions([partition]) client.send.side_effect = asyncio.coroutine( lambda n, r: OffsetResponse([('test', [(0, -1, [])])])) with self.assertRaises(UnknownError): yield from fetcher.update_fetch_positions([partition]) yield from fetcher.close()
def test_metadata_synchronizer(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=100) with mock.patch.object( AIOKafkaClient, '_metadata_update') as mocked: @asyncio.coroutine def dummy(*d, **kw): client.cluster.failed_update(None) mocked.side_effect = dummy yield from client.bootstrap() yield from asyncio.sleep(0.15, loop=self.loop) yield from client.close() self.assertNotEqual( len(client._metadata_update.mock_calls), 0)
def test_concurrent_send_on_different_connection_groups(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) yield from client.bootstrap() self.add_cleanup(client.close) yield from self.wait_topic(client, self.topic) node_id = client.get_random_node() broker = client.cluster.broker_metadata(node_id) client.cluster.add_coordinator( node_id, broker.host, broker.port, rack=None, purpose=(CoordinationType.GROUP, "")) wait_request = FetchRequest_v0( -1, # replica_id 500, # max_wait_ms 1024 * 1024, # min_bytes [(self.topic, [(0, 0, 1024)] )]) vanila_request = MetadataRequest([]) send_time = self.loop.time() long_task = self.loop.create_task( client.send(node_id, wait_request) ) yield from asyncio.sleep(0.0001, loop=self.loop) self.assertFalse(long_task.done()) yield from client.send( node_id, vanila_request, group=ConnectionGroup.COORDINATION) resp_time = self.loop.time() self.assertFalse(long_task.done()) fetch_resp = yield from long_task # Check error code like resp->topics[0]->partitions[0]->error_code self.assertEqual(fetch_resp.topics[0][1][0][1], 0) # Check that vanila request actually executed after wait request self.assertLess(resp_time - send_time, 0.5)
def test_check_version(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() ver = yield from client.check_version() self.assertTrue('0.' in ver) yield from self.wait_topic(client, 'some_test_topic') ver2 = yield from client.check_version() self.assertEqual(ver, ver2) ver2 = yield from client.check_version(client.get_random_node()) self.assertEqual(ver, ver2) with mock.patch.object( AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') with self.assertRaises(UnrecognizedBrokerVersion): yield from client.check_version(client.get_random_node()) client._get_conn = asyncio.coroutine(lambda _: None) with self.assertRaises(ConnectionError): yield from client.check_version()
def test_different_connections_in_conn_groups(self): client = AIOKafkaClient( loop=self.loop, bootstrap_servers=self.hosts, metadata_max_age_ms=10000) yield from client.bootstrap() self.add_cleanup(client.close) node_id = client.get_random_node() broker = client.cluster.broker_metadata(node_id) client.cluster.add_coordinator( node_id, broker.host, broker.port, rack=None, purpose=(CoordinationType.GROUP, "")) conn1 = yield from client._get_conn(node_id) conn2 = yield from client._get_conn( node_id, group=ConnectionGroup.COORDINATION) self.assertTrue(conn1 is not conn2) self.assertEqual((conn1.host, conn1.port), (conn2.host, conn2.port))
def test_send_produce_request_raises_when_topic_unknown(self, protocol): @asyncio.coroutine def recv(request_id): return b"response" mocked_conns = {("broker_1", 4567): mock.MagicMock()} mocked_conns[("broker_1", 4567)].recv.side_effect = recv client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) client._conns = mocked_conns brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)] topics = [TopicMetadata("topic_doesnt_exist", UNKNOWN_TOPIC_OR_PARTITION, [])] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) requests = [ProduceRequest("topic_doesnt_exist", 0, [create_message("a"), create_message("b")])] with self.assertRaises(UnknownTopicOrPartitionError): self.loop.run_until_complete(client.send_produce_request(requests))
def test_ensure_topic_exists(self, protocol): mocked_conns = {("broker_1", 4567): mock.MagicMock()} fut = asyncio.Future(loop=self.loop) fut.set_result(b"response") mocked_conns[("broker_1", 4567)].send.return_value = fut client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) client._conns = mocked_conns brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)] topics = [ TopicMetadata("topic_still_creating", NO_LEADER, []), TopicMetadata("topic_doesnt_exist", UNKNOWN_TOPIC_OR_PARTITION, []), TopicMetadata( "topic_noleaders", NO_ERROR, [ PartitionMetadata("topic_noleaders", 0, -1, [], [], NO_LEADER), PartitionMetadata("topic_noleaders", 1, -1, [], [], NO_LEADER), ], ), ] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) with self.assertRaises(UnknownTopicOrPartitionError): self.loop.run_until_complete(client.ensure_topic_exists("topic_doesnt_exist", timeout=1)) with self.assertRaises(KafkaTimeoutError): self.loop.run_until_complete(client.ensure_topic_exists("topic_still_creating", timeout=1)) # This should not raise self.loop.run_until_complete(client.ensure_topic_exists("topic_noleaders", timeout=1))
def test_has_metadata_for_topic(self, protocol): @asyncio.coroutine def recv(request_id): return b"response" mocked_conns = {("broker_1", 4567): mock.MagicMock()} mocked_conns[("broker_1", 4567)].recv.side_effect = recv client = AIOKafkaClient(["broker_1:4567"], loop=self.loop) client._conns = mocked_conns brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)] topics = [ TopicMetadata("topic_still_creating", NO_LEADER, []), TopicMetadata("topic_doesnt_exist", UNKNOWN_TOPIC_OR_PARTITION, []), TopicMetadata( "topic_noleaders", NO_ERROR, [ PartitionMetadata("topic_noleaders", 0, -1, [], [], NO_LEADER), PartitionMetadata("topic_noleaders", 1, -1, [], [], NO_LEADER), ], ), ] protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics) self.loop.run_until_complete(client.load_metadata_for_topics()) # Topics with no partitions return False self.assertFalse(client.has_metadata_for_topic("topic_still_creating")) self.assertFalse(client.has_metadata_for_topic("topic_doesnt_exist")) # Topic with partition metadata, but no leaders return True self.assertTrue(client.has_metadata_for_topic("topic_noleaders"))
def test_fetchoffsets_failed_scenarios(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) yield from client.bootstrap() yield from self.wait_topic(client, 'topic1') subscription = SubscriptionState('earliest') subscription.subscribe(topics=('topic1',)) coordinator = GroupCoordinator( client, subscription, loop=self.loop, group_id='fetch-offsets-group') yield from coordinator.ensure_active_group() offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')} with mock.patch('kafka.common.for_code') as mocked: mocked.side_effect = MockedKafkaErrCode( Errors.GroupLoadInProgressError, Errors.NoError) yield from coordinator.fetch_committed_offsets(offsets) mocked.side_effect = MockedKafkaErrCode( Errors.UnknownMemberIdError, Errors.NoError) with self.assertRaises(Errors.UnknownMemberIdError): yield from coordinator.fetch_committed_offsets(offsets) self.assertEqual(subscription.needs_partition_assignment, True) mocked.side_effect = None mocked.return_value = Errors.UnknownTopicOrPartitionError r = yield from coordinator.fetch_committed_offsets(offsets) self.assertEqual(r, {}) mocked.return_value = KafkaError with self.assertRaises(KafkaError): yield from coordinator.fetch_committed_offsets(offsets) mocked.side_effect = MockedKafkaErrCode( Errors.NotCoordinatorForGroupError, Errors.NoError, Errors.NoError, Errors.NoError) yield from coordinator.fetch_committed_offsets(offsets) yield from coordinator.close() yield from client.close()