def test_send_broker_unaware_request(self): mocked_conns = { ('kafka01', 9092): MagicMock(), ('kafka02', 9092): MagicMock(), ('kafka03', 9092): MagicMock() } # inject KafkaConnection side effects mock_conn(mocked_conns[('kafka01', 9092)], success=False) mock_conn(mocked_conns[('kafka03', 9092)], success=False) future = Future() mocked_conns[('kafka02', 9092)].send.return_value = future mocked_conns[('kafka02', 9092)].recv.side_effect = lambda: future.success('valid response') def mock_get_conn(host, port): return mocked_conns[(host, port)] # patch to avoid making requests before we want it with patch.object(SimpleClient, 'load_metadata_for_topics'): with patch.object(SimpleClient, '_get_conn', side_effect=mock_get_conn): client = SimpleClient(hosts='kafka01:9092,kafka02:9092') resp = client._send_broker_unaware_request(payloads=['fake request'], encoder_fn=MagicMock(), decoder_fn=lambda x: x) self.assertEqual('valid response', resp) mocked_conns[('kafka02', 9092)].recv.assert_called_once_with()
def _send_offset_request(self, partition, timestamp): """Fetch a single offset before the given timestamp for the partition. Arguments: partition (TopicPartition): partition that needs fetching offset timestamp (int): timestamp for fetching offset Returns: Future: resolves to the corresponding offset """ node_id = self._client.cluster.leader_for_partition(partition) if node_id is None: log.debug("Partition %s is unknown for fetching offset," " wait for metadata refresh", partition) return Future().failure(Errors.StaleMetadata(partition)) elif node_id == -1: log.debug("Leader for partition %s unavailable for fetching offset," " wait for metadata refresh", partition) return Future().failure(Errors.LeaderNotAvailableError(partition)) request = OffsetRequest[0]( -1, [(partition.topic, [(partition.partition, timestamp, 1)])] ) # Client returns a future that only fails on network issues # so create a separate future and attach a callback to update it # based on response error codes future = Future() _f = self._client.send(node_id, request) _f.add_callback(self._handle_offset_response, partition, future) _f.add_errback(lambda e: future.failure(e)) return future
def test_send_offset_commit_request_success(mocker, patched_coord, offsets): _f = Future() patched_coord._client.send.return_value = _f future = patched_coord._send_offset_commit_request(offsets) (node, request), _ = patched_coord._client.send.call_args response = OffsetCommitResponse[0]([('foobar', [(0, 0), (1, 0)])]) _f.success(response) patched_coord._handle_offset_commit_response.assert_called_with( offsets, future, mocker.ANY, response)
def test_send_offset_fetch_request_success(patched_coord, partitions): _f = Future() patched_coord._client.send.return_value = _f future = patched_coord._send_offset_fetch_request(partitions) (node, request), _ = patched_coord._client.send.call_args response = OffsetFetchResponse[0]([('foobar', [(0, 123, b'', 0), (1, 234, b'', 0)])]) _f.success(response) patched_coord._handle_offset_fetch_response.assert_called_with( future, response)
def test_send_offset_fetch_request_failure(patched_coord, partitions): _f = Future() patched_coord._client.send.return_value = _f future = patched_coord._send_offset_fetch_request(partitions) (node, request), _ = patched_coord._client.send.call_args error = Exception() _f.failure(error) patched_coord._failed_request.assert_called_with(0, request, future, error) assert future.failed() assert future.exception is error
def test_handle_offset_fetch_response(patched_coord, offsets, response, error, dead): future = Future() patched_coord._handle_offset_fetch_response(future, response) if error is not None: assert isinstance(future.exception, error) else: assert future.succeeded() assert future.value == offsets assert patched_coord.coordinator_id is (None if dead else 0)
def send(self, request, expect_response=True): """send request, return Future() Can block on network if request is larger than send_buffer_bytes """ future = Future() if self.connecting(): return future.failure(Errors.NodeNotReadyError(str(self))) elif not self.connected(): return future.failure(Errors.ConnectionError(str(self))) elif not self.can_send_more(): return future.failure(Errors.TooManyInFlightRequests(str(self))) return self._send(request, expect_response=expect_response)
def test_process_queue_success_cb(self): """ Verify success callback executed on Future success""" mock_future = Future() mock_future.is_done = True mock_future.value = 1 self.kafka_producer.send = MagicMock(return_value=mock_future) with self.assertRaises(SuccessException) as context: process_queue(self.event_queue, "test", self.kafka_producer, self.success_cb, self.error_cb)
def test_process_queue(self): """ Verify message queue is processed.""" mock_future = Future() mock_future.is_done = True mock_future.value = 1 self.kafka_producer.send = MagicMock(return_value=mock_future) with self.assertRaises(SuccessException) as context: process_queue(self.event_queue, "test", self.kafka_producer, self.success_cb, self.error_cb) # Verify message queue "get" is called self.event_queue.get.assert_called_with()
def test_process_queue_error_cb(self): """ Verify error callback executed on Future exception""" mock_future = Future() mock_future.is_done = True mock_future.exception = FailureException() cb_spy = Mock(wraps=self.error_cb) with self.assertRaises(FailureException) as context: self.kafka_producer.send = MagicMock(return_value=mock_future) process_queue(self.event_queue, "test", self.kafka_producer, self.success_cb, cb_spy) # Also verify propagation of queue and message to error callback. # Message is specified by mocking of self.event_queue.get in setUp. cb_spy.assert_called_with(1, self.event_queue)
def _send(self, request, expect_response=True): assert self.state in (ConnectionStates.AUTHENTICATING, ConnectionStates.CONNECTED) future = Future() correlation_id = self._next_correlation_id() header = RequestHeader(request, correlation_id=correlation_id, client_id=self.config['client_id']) message = b''.join([header.encode(), request.encode()]) size = Int32.encode(len(message)) data = size + message try: # In the future we might manage an internal write buffer # and send bytes asynchronously. For now, just block # sending each request payload self._sock.setblocking(True) total_sent = 0 while total_sent < len(data): sent_bytes = self._sock.send(data[total_sent:]) total_sent += sent_bytes assert total_sent == len(data) if self._sensors: self._sensors.bytes_sent.record(total_sent) self._sock.setblocking(False) except (AssertionError, ConnectionError) as e: log.exception("Error sending %s to %s", request, self) error = Errors.ConnectionError("%s: %s" % (str(self), e)) self.close(error=error) return future.failure(error) log.debug('%s Request %d: %s', self, correlation_id, request) if expect_response: ifr = InFlightRequest(request=request, correlation_id=correlation_id, response_type=request.RESPONSE_TYPE, future=future, timestamp=time.time()) self.in_flight_requests.append(ifr) else: future.success(None) return future
def _send_heartbeat_request(self): """Send a heartbeat request""" request = HeartbeatRequest[0](self.group_id, self.generation, self.member_id) log.debug("Heartbeat: %s[%s] %s", request.group, request.generation_id, request.member_id) #pylint: disable-msg=no-member future = Future() _f = self._client.send(self.coordinator_id, request) _f.add_callback(self._handle_heartbeat_response, future) _f.add_errback(self._failed_request, self.coordinator_id, request, future) return future
def _send(self, request, expect_response=True): future = Future() correlation_id = self._next_correlation_id() header = RequestHeader(request, correlation_id=correlation_id, client_id=self.config['client_id']) message = b''.join([header.encode(), request.encode()]) size = Int32.encode(len(message)) data = size + message try: # In the future we might manage an internal write buffer # and send bytes asynchronously. For now, just block # sending each request payload self._sock.setblocking(True) total_sent = 0 while total_sent < len(data): sent_bytes = self._sock.send(data[total_sent:]) total_sent += sent_bytes assert total_sent == len(data) if self._sensors: self._sensors.bytes_sent.record(total_sent) self._sock.setblocking(False) except (AssertionError, ConnectionError) as e: log.exception("Error sending %s to %s", request, self) error = Errors.ConnectionError("%s: %s" % (str(self), e)) self.close(error=error) return future.failure(error) log.debug('%s Request %d: %s', self, correlation_id, request) if expect_response: ifr = InFlightRequest(request=request, correlation_id=correlation_id, response_type=request.RESPONSE_TYPE, future=future, timestamp=time.time()) self.in_flight_requests.append(ifr) else: future.success(None) return future
def _send_offset_request(self, partitions, timestamp): """Fetch a single offset before the given timestamp for the partition. Arguments: partitions iterable of TopicPartition: partitions that needs fetching offset timestamp (int): timestamp for fetching offset Returns: list of Future: resolves to the corresponding offset """ topic = partitions[0].topic nodes_per_partitions = {} for partition in partitions: node_id = self._client.cluster.leader_for_partition(partition) if node_id is None: log.debug("Partition %s is unknown for fetching offset," " wait for metadata refresh", partition) return Future().failure(Errors.StaleMetadata(partition)) elif node_id == -1: log.debug("Leader for partition %s unavailable for fetching offset," " wait for metadata refresh", partition) return Future().failure(Errors.LeaderNotAvailableError(partition)) nodes_per_partitions.setdefault(node_id, []).append(partition) # Client returns a future that only fails on network issues # so create a separate future and attach a callback to update it # based on response error codes futures = [] for node_id, partitions in six.iteritems(nodes_per_partitions): request = OffsetRequest[0]( -1, [(topic, [(partition.partition, timestamp, 1) for partition in partitions])] ) future_request = Future() _f = self._client.send(node_id, request) _f.add_callback(self._handle_offset_response, partitions, future_request) _f.add_errback(lambda e: future_request.failure(e)) futures.append(future_request) return futures
def test_send_broker_unaware_request(self): mocked_conns = {("kafka01", 9092): MagicMock(), ("kafka02", 9092): MagicMock(), ("kafka03", 9092): MagicMock()} # inject BrokerConnection side effects mock_conn(mocked_conns[("kafka01", 9092)], success=False) mock_conn(mocked_conns[("kafka03", 9092)], success=False) future = Future() mocked_conns[("kafka02", 9092)].send.return_value = future mocked_conns[("kafka02", 9092)].recv.side_effect = lambda: future.success("valid response") def mock_get_conn(host, port, afi): return mocked_conns[(host, port)] # patch to avoid making requests before we want it with patch.object(SimpleClient, "load_metadata_for_topics"): with patch.object(SimpleClient, "_get_conn", side_effect=mock_get_conn): client = SimpleClient(hosts="kafka01:9092,kafka02:9092") resp = client._send_broker_unaware_request( payloads=["fake request"], encoder_fn=MagicMock(), decoder_fn=lambda x: x ) self.assertEqual("valid response", resp) mocked_conns[("kafka02", 9092)].recv.assert_called_once_with()
def _send_offset_request(self, node_id, timestamps): by_topic = collections.defaultdict(list) for tp, timestamp in six.iteritems(timestamps): if self.config['api_version'] >= (0, 10, 1): data = (tp.partition, timestamp) else: data = (tp.partition, timestamp, 1) by_topic[tp.topic].append(data) if self.config['api_version'] >= (0, 10, 1): request = OffsetRequest[1](-1, list(six.iteritems(by_topic))) else: request = OffsetRequest[0](-1, list(six.iteritems(by_topic))) # Client returns a future that only fails on network issues # so create a separate future and attach a callback to update it # based on response error codes future = Future() _f = self._client.send(node_id, request) _f.add_callback(self._handle_offset_response, future) _f.add_errback(lambda e: future.failure(e)) return future
def send(self, request, expect_response=True): """send request, return Future() Can block on network if request is larger than send_buffer_bytes """ future = Future() if self.connecting(): return future.failure(Errors.NodeNotReadyError(str(self))) elif not self.connected(): return future.failure(Errors.ConnectionError(str(self))) elif not self.can_send_more(): return future.failure(Errors.TooManyInFlightRequests(str(self))) correlation_id = self._next_correlation_id() header = RequestHeader(request, correlation_id=correlation_id, client_id=self.config['client_id']) message = b''.join([header.encode(), request.encode()]) size = Int32.encode(len(message)) try: # In the future we might manage an internal write buffer # and send bytes asynchronously. For now, just block # sending each request payload self._sock.setblocking(True) for data in (size, message): total_sent = 0 while total_sent < len(data): sent_bytes = self._sock.send(data[total_sent:]) total_sent += sent_bytes assert total_sent == len(data) self._sock.setblocking(False) except (AssertionError, ConnectionError) as e: log.exception("Error sending %s to %s", request, self) error = Errors.ConnectionError("%s: %s" % (str(self), e)) self.close(error=error) return future.failure(error) log.debug('%s Request %d: %s', self, correlation_id, request) if expect_response: ifr = InFlightRequest(request=request, correlation_id=correlation_id, response_type=request.RESPONSE_TYPE, future=future, timestamp=time.time()) self.in_flight_requests.append(ifr) else: future.success(None) return future
def test_delete_topics( self, mock_new_topics, mock_least_loaded_node, bootstrap_brokers, delete_response, metadata_response, ): mock_kafka_client = mock.Mock() mock_kafka_client.poll = \ mock.Mock(side_effect=[metadata_response, delete_response]) mock_kafka_client.ready.return_value = True mock_kafka_client.least_loaded_node.return_value = \ mock_least_loaded_node mock_kafka_client.send.return_value = Future() admin = AdminClient(mock_kafka_client) response = admin.delete_topics(mock_new_topics, 0) assert response == delete_response
def test__handle_offset_response(fetcher, mocker): # Broker returns UnsupportedForMessageFormatError, will omit partition fut = Future() res = OffsetResponse[1]([ ("topic", [(0, 43, -1, -1)]), ("topic", [(1, 0, 1000, 9999)]) ]) fetcher._handle_offset_response(fut, res) assert fut.succeeded() assert fut.value == {TopicPartition("topic", 1): (9999, 1000)} # Broker returns NotLeaderForPartitionError fut = Future() res = OffsetResponse[1]([ ("topic", [(0, 6, -1, -1)]), ]) fetcher._handle_offset_response(fut, res) assert fut.failed() assert isinstance(fut.exception, NotLeaderForPartitionError) # Broker returns UnknownTopicOrPartitionError fut = Future() res = OffsetResponse[1]([ ("topic", [(0, 3, -1, -1)]), ]) fetcher._handle_offset_response(fut, res) assert fut.failed() assert isinstance(fut.exception, UnknownTopicOrPartitionError) # Broker returns many errors and 1 result # Will fail on 1st error and return fut = Future() res = OffsetResponse[1]([ ("topic", [(0, 43, -1, -1)]), ("topic", [(1, 6, -1, -1)]), ("topic", [(2, 3, -1, -1)]), ("topic", [(3, 0, 1000, 9999)]) ]) fetcher._handle_offset_response(fut, res) assert fut.failed() assert isinstance(fut.exception, NotLeaderForPartitionError)
def _on_join_leader(self, response): """ Perform leader synchronization and send back the assignment for the group via SyncGroupRequest Arguments: response (JoinResponse): broker response to parse Returns: Future: resolves to member assignment encoded-bytes """ try: group_assignment = self._perform_assignment( response.leader_id, response.group_protocol, response.members) except Exception as e: return Future().failure(e) version = 0 if self.config["api_version"] < (0, 11, 0) else 1 request = SyncGroupRequest[version]( self.group_id, self._generation.generation_id, self._generation.member_id, [( member_id, assignment if isinstance(assignment, bytes) else assignment.encode(), ) for member_id, assignment in six.iteritems(group_assignment)], ) log.debug( "Sending leader SyncGroup for group %s to coordinator %s: %s", self.group_id, self.coordinator_id, request, ) return self._send_sync_group_request(request)