def get_offsets_by_times(self, timestamps, timeout_ms): offsets = yield from self._retrieve_offsets(timestamps, timeout_ms) for tp in timestamps: if tp not in offsets: offsets[tp] = None else: offset, timestamp = offsets[tp] if offset == UNKNOWN_OFFSET: offsets[tp] = None else: offsets[tp] = OffsetAndTimestamp(offset, timestamp) return offsets
async def test_fetcher__update_fetch_positions(self): client = AIOKafkaClient(bootstrap_servers=[]) subscriptions = SubscriptionState() fetcher = Fetcher(client, subscriptions) self.add_cleanup(fetcher.close) # Disable background task fetcher._fetch_task.cancel() try: await fetcher._fetch_task except asyncio.CancelledError: pass fetcher._fetch_task = create_task(asyncio.sleep(1000000)) partition = TopicPartition('test', 0) offsets = {partition: OffsetAndTimestamp(12, -1)} async def _proc_offset_request(node_id, topic_data): return offsets fetcher._proc_offset_request = mock.Mock() fetcher._proc_offset_request.side_effect = _proc_offset_request def reset_assignment(): subscriptions.assign_from_user({partition}) assignment = subscriptions.subscription.assignment tp_state = assignment.state_value(partition) return assignment, tp_state assignment, tp_state = reset_assignment() self.assertIsNone(tp_state._position) # CASE: reset from committed # In basic case we will need to wait for committed update_task = create_task( fetcher._update_fetch_positions(assignment, 0, [partition]), ) await asyncio.sleep(0.1) self.assertFalse(update_task.done()) # Will continue only after committed is resolved tp_state.update_committed(OffsetAndMetadata(4, "")) needs_wakeup = await update_task self.assertFalse(needs_wakeup) self.assertEqual(tp_state._position, 4) self.assertEqual(fetcher._proc_offset_request.call_count, 0) # CASE: will not query committed if position already present await fetcher._update_fetch_positions(assignment, 0, [partition]) self.assertEqual(tp_state._position, 4) self.assertEqual(fetcher._proc_offset_request.call_count, 0) # CASE: awaiting_reset for the partition tp_state.await_reset(OffsetResetStrategy.LATEST) self.assertIsNone(tp_state._position) await fetcher._update_fetch_positions(assignment, 0, [partition]) self.assertEqual(tp_state._position, 12) self.assertEqual(fetcher._proc_offset_request.call_count, 1) # CASE: seeked while waiting for committed to be resolved assignment, tp_state = reset_assignment() update_task = create_task( fetcher._update_fetch_positions(assignment, 0, [partition]), ) await asyncio.sleep(0.1) self.assertFalse(update_task.done()) tp_state.seek(8) tp_state.update_committed(OffsetAndMetadata(4, "")) await update_task self.assertEqual(tp_state._position, 8) self.assertEqual(fetcher._proc_offset_request.call_count, 1) # CASE: awaiting_reset during waiting for committed assignment, tp_state = reset_assignment() update_task = create_task( fetcher._update_fetch_positions(assignment, 0, [partition]), ) await asyncio.sleep(0.1) self.assertFalse(update_task.done()) tp_state.await_reset(OffsetResetStrategy.LATEST) tp_state.update_committed(OffsetAndMetadata(4, "")) await update_task self.assertEqual(tp_state._position, 12) self.assertEqual(fetcher._proc_offset_request.call_count, 2) # CASE: reset using default strategy if committed offset undefined assignment, tp_state = reset_assignment() loop = get_running_loop() loop.call_later(0.01, tp_state.update_committed, OffsetAndMetadata(-1, "")) await fetcher._update_fetch_positions(assignment, 0, [partition]) self.assertEqual(tp_state._position, 12) self.assertEqual(fetcher._records, {}) # CASE: set error if _default_reset_strategy = OffsetResetStrategy.NONE assignment, tp_state = reset_assignment() loop.call_later(0.01, tp_state.update_committed, OffsetAndMetadata(-1, "")) fetcher._default_reset_strategy = OffsetResetStrategy.NONE needs_wakeup = await fetcher._update_fetch_positions( assignment, 0, [partition]) self.assertTrue(needs_wakeup) self.assertIsNone(tp_state._position) self.assertIsInstance(fetcher._records[partition], FetchError) fetcher._records.clear() # CASE: if _proc_offset_request errored, we will retry on another spin fetcher._proc_offset_request.side_effect = UnknownError() assignment, tp_state = reset_assignment() tp_state.await_reset(OffsetResetStrategy.LATEST) await fetcher._update_fetch_positions(assignment, 0, [partition]) self.assertIsNone(tp_state._position) self.assertTrue(tp_state.awaiting_reset) # CASE: reset 2 partitions separately, 1 will raise, 1 will get # committed fetcher._proc_offset_request.side_effect = _proc_offset_request partition2 = TopicPartition('test', 1) subscriptions.assign_from_user({partition, partition2}) assignment = subscriptions.subscription.assignment tp_state = assignment.state_value(partition) tp_state2 = assignment.state_value(partition2) tp_state.await_reset(OffsetResetStrategy.LATEST) loop.call_later(0.01, tp_state2.update_committed, OffsetAndMetadata(5, "")) await fetcher._update_fetch_positions(assignment, 0, [partition, partition2]) self.assertEqual(tp_state.position, 12) self.assertEqual(tp_state2.position, 5)
async def test_fetcher_offsets_for_times(self): client = AIOKafkaClient(bootstrap_servers=[]) async def ready(conn): return True async def _maybe_wait_metadata(): return False client.ready = mock.MagicMock() client.ready.side_effect = ready client._maybe_wait_metadata = mock.MagicMock() client._maybe_wait_metadata.side_effect = _maybe_wait_metadata client.cluster.leader_for_partition = mock.MagicMock() client.cluster.leader_for_partition.return_value = 0 client._api_version = (0, 10, 1) subscriptions = SubscriptionState() fetcher = Fetcher(client, subscriptions) tp0 = TopicPartition("topic", 0) tp1 = TopicPartition("topic", 1) # Timeouting will result in KafkaTimeoutError with mock.patch.object(fetcher, "_proc_offset_requests") as mocked: mocked.side_effect = asyncio.TimeoutError with self.assertRaises(KafkaTimeoutError): await fetcher.get_offsets_by_times({tp0: 0}, 1000) # Broker returns UnsupportedForMessageFormatError with mock.patch.object(client, "send") as mocked: async def mock_send(node_id, request): return OffsetResponse[1]([("topic", [(0, 43, -1, -1)]), ("topic", [(1, 0, 1000, 9999)])]) mocked.side_effect = mock_send offsets = await fetcher.get_offsets_by_times({ tp0: 0, tp1: 0 }, 1000) self.assertEqual(offsets, { tp0: None, tp1: OffsetAndTimestamp(9999, 1000), }) # Brokers returns NotLeaderForPartitionError with mock.patch.object(client, "send") as mocked: async def mock_send(node_id, request): return OffsetResponse[1]([ ("topic", [(0, 6, -1, -1)]), ]) mocked.side_effect = mock_send with self.assertRaises(NotLeaderForPartitionError): await fetcher._proc_offset_request(0, {"topic": (0, 1000)}) # Broker returns UnknownTopicOrPartitionError with mock.patch.object(client, "send") as mocked: async def mock_send(node_id, request): return OffsetResponse[1]([ ("topic", [(0, 3, -1, -1)]), ]) mocked.side_effect = mock_send with self.assertLogs("aiokafka.consumer.fetcher", "WARN") as cm: with self.assertRaises(UnknownTopicOrPartitionError): await fetcher._proc_offset_request(0, {"topic": (0, 1000)}) if cm is not None: self.assertIn("Received unknown topic or partition error", cm.output[0])