def test_decode_produce_response(self): t1 = "topic1" t2 = "topic2" encoded = struct.pack('>iih%dsiihqihqh%dsiihq' % (len(t1), len(t2)), 2, 2, len(t1), compat.bytes(t1), 2, 0, 0, compat.long(10), 1, 1, compat.long(20), len(t2), compat.bytes(t2), 1, 0, 0, compat.long(30)) responses = list(KafkaProtocol.decode_produce_response(encoded)) self.assertEqual(responses, [ ProduceResponse(t1, 0, 0, compat.long(10)), ProduceResponse(t1, 1, 1, compat.long(20)), ProduceResponse(t2, 0, 0, compat.long(30)) ])
def test_decode_produce_response(self): t1 = b"topic1" t2 = b"topic2" _long = int if six.PY2: _long = long encoded = struct.pack('>iih%dsiihqihqh%dsiihq' % (len(t1), len(t2)), 2, 2, len(t1), t1, 2, 0, 0, _long(10), 1, 1, _long(20), len(t2), t2, 1, 0, 0, _long(30)) responses = list(KafkaProtocol.decode_produce_response(encoded)) self.assertEqual(responses, [ ProduceResponse(t1, 0, 0, _long(10)), ProduceResponse(t1, 1, 1, _long(20)), ProduceResponse(t2, 0, 0, _long(30)) ])
def send_side_effect(reqs, *args, **kwargs): if self.client.is_first_time: self.client.is_first_time = False return [ ProduceResponse(req.topic, req.partition, NotLeaderForPartitionError.errno, -1) for req in reqs ] responses = [] for req in reqs: offset = offsets[req.topic][req.partition] offsets[req.topic][req.partition] += len(req.messages) responses.append( ProduceResponse(req.topic, req.partition, 0, offset)) return responses
def test_populate_topic_to_offset_map(self, producer, topic): response_one = ProduceResponse(topic, partition=0, error=0, offset=1) response_two = FailedPayloadsError(payload=mock.Mock()) responses = [response_one, response_two] topics_map = producer._kafka_producer._populate_topics_to_offset_map( responses) assert len(topics_map) == 1 assert topic in topics_map
def send_side_effect(reqs, *args, **kwargs): if self.client.is_first_time: self.client.is_first_time = False return [FailedPayloadsError(req) for req in reqs] responses = [] for req in reqs: offset = offsets[req.topic][req.partition] offsets[req.topic][req.partition] += len(req.messages) responses.append( ProduceResponse(req.topic, req.partition, 0, offset)) return responses
def test_publish_one_msg_succeeds_one_fails_after_retry( self, message, another_message, topic, producer): # TODO(DATAPIPE-606|clin) investigate better way than mocking response mock_response = ProduceResponse(topic, partition=0, error=0, offset=1) fail_response = FailedPayloadsError(payload=mock.Mock()) side_effect = ([[mock_response, fail_response]] + [[fail_response]] * self.max_retry_count) with mock.patch.object( producer._kafka_producer.kafka_client, 'send_produce_request', side_effect=side_effect), pytest.raises(MaxRetryError) as e: producer.publish(message) producer.publish(another_message) producer.flush() self.assert_last_retry_result(e.value.last_result, another_message, expected_published_msgs_count=1)
def decode_produce_response(cls, data): """ Decode bytes to a ProduceResponse Params ====== data: bytes to decode """ ((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0) for i in range(num_topics): ((strlen, ), cur) = relative_unpack('>h', data, cur) topic = data[cur:cur + strlen] cur += strlen ((num_partitions, ), cur) = relative_unpack('>i', data, cur) for i in range(num_partitions): ((partition, error, offset), cur) = relative_unpack('>ihq', data, cur) yield ProduceResponse(topic, partition, error, offset)