def test_correlates_responses(self, read_message): request1 = metadata.MetadataRequest() request2 = metadata.MetadataRequest(topics=["example.foo"]) response1 = metadata.MetadataResponse( brokers=[metadata.Broker(broker_id=1, host="broker01", port=333)], topics=[ metadata.TopicMetadata(error_code=0, name="example.foo"), metadata.TopicMetadata(error_code=0, name="example.bar"), ]) response1.correlation_id = request1.correlation_id response2 = metadata.MetadataResponse( brokers=[metadata.Broker(broker_id=1, host="broker01", port=333)], topics=[ metadata.TopicMetadata(error_code=0, name="example.foo"), ]) response2.correlation_id = request2.correlation_id # response2 comes over the wire before response1 responses = [response2, response1] def get_next_response(*args): return self.future_value(responses.pop(0)) read_message.side_effect = get_next_response conn = Connection("localhost", 1234) conn.stream = Mock() conn.stream.write.return_value = self.future_value(None) actual_responses = [conn.send(request1), conn.send(request2)] yield conn.read_loop() # first response is the one with two topics self.assertEqual(len(actual_responses[0].result().topics), 2) self.assertEqual(len(actual_responses[1].result().topics), 1)
def test_heal_with_initial_broker_errors(self): initial = metadata.MetadataResponse( brokers=[ metadata.Broker(broker_id=2, host="kafka01", port=9092), metadata.Broker(broker_id=7, host="kafka02", port=9000), metadata.Broker(broker_id=8, host="kafka03", port=9092), ], topics=[ metadata.TopicMetadata( error_code=errors.no_error, name="test.topic", partitions=[ metadata.PartitionMetadata( error_code=errors.no_error, partition_id=0, leader=2, replicas=[], isrs=[], ), ] ), ], ) fixed = metadata.MetadataResponse( brokers=[ metadata.Broker(broker_id=2, host="kafka01", port=9092), ], topics=[ metadata.TopicMetadata( error_code=errors.no_error, name="test.topic", partitions=[ metadata.PartitionMetadata( error_code=errors.no_error, partition_id=0, leader=2, replicas=[], isrs=[], ), ] ), ], ) self.add_broker("kafka01", 9092, responses=[initial, fixed]) self.add_broker( "kafka02", 9000, connect_error=iostream.StreamClosedError(), ) self.add_broker( "kafka03", 9092, connect_error=exc.BrokerConnectionError("kafka03", 9092), ) conn1 = cluster.Connection("kafka01", 9092) c = cluster.Cluster(["kafka01", "kafka02:900"]) c.conns = {2: conn1} yield c.heal() self.assertEqual([2], list(c.conns.keys()))
def test_heal_with_intial_topic_errors(self): initial = metadata.MetadataResponse( brokers=[ metadata.Broker(broker_id=2, host="kafka01", port=9092), ], topics=[ metadata.TopicMetadata( error_code=errors.no_error, name="test.topic", partitions=[ metadata.PartitionMetadata( error_code=errors.leader_not_available, partition_id=0, leader=8, replicas=[7, 2], isrs=[7, 2], ), metadata.PartitionMetadata( error_code=errors.no_error, partition_id=1, leader=7, replicas=[2, 8], isrs=[2, 8], ), ] ), metadata.TopicMetadata( error_code=errors.replica_not_available, name="other.topic", partitions=[], ), metadata.TopicMetadata( error_code=errors.unknown_topic_or_partition, name="fake.topic", partitions=[], ), ], ) fixed = metadata.MetadataResponse( brokers=[ metadata.Broker(broker_id=2, host="kafka01", port=9092), ], topics=[ metadata.TopicMetadata( error_code=errors.no_error, name="test.topic", partitions=[ metadata.PartitionMetadata( error_code=errors.no_error, partition_id=0, leader=2, replicas=[], isrs=[], ), metadata.PartitionMetadata( error_code=errors.no_error, partition_id=1, leader=2, replicas=[], isrs=[], ), ] ), ], ) self.add_broker("kafka01", 9092, responses=[initial, fixed]) conn1 = cluster.Connection("kafka01", 9092) c = cluster.Cluster(["kafka01", "kafka02:900"]) c.conns = {2: conn1} yield c.heal() self.assertEqual(c.topics, {"test.topic": [0, 1]}) self.assertEqual(c.leaders, {"test.topic": {0: 2, 1: 2}})
def test_heal(self): response = metadata.MetadataResponse( brokers=[ metadata.Broker(broker_id=2, host="kafka01", port=9092), metadata.Broker(broker_id=8, host="kafka02", port=9000), metadata.Broker(broker_id=7, host="kafka03", port=9092), ], topics=[ metadata.TopicMetadata( error_code=errors.no_error, name="test.topic", partitions=[ metadata.PartitionMetadata( error_code=errors.no_error, partition_id=0, leader=8, replicas=[7, 2], isrs=[7, 2], ), metadata.PartitionMetadata( error_code=errors.no_error, partition_id=1, leader=7, replicas=[2, 8], isrs=[2, 8], ), ] ), metadata.TopicMetadata( error_code=errors.no_error, name="other.topic", partitions=[ metadata.PartitionMetadata( error_code=errors.no_error, partition_id=3, leader=2, replicas=[8, 7], isrs=[7], ), ] ), ], ) self.add_broker("kafka01", 9092, responses=[response]) self.add_broker("kafka02", 9000, responses=[response]) self.add_broker("kafka04", 9092, responses=[response]) self.add_broker("kafka03", 9092, responses=[]) c = cluster.Cluster(["kafka01", "kafka02:900"]) # in the response, stays where it is conn1 = cluster.Connection("kafka01", 9092) # closing but in response, gets replaced conn2 = cluster.Connection("kafka02", 9000) conn2.closing = True # not in response, gets aborted and dropped conn3 = cluster.Connection("kafka04", 9092) c.conns = {2: conn1, 8: conn2, 1: conn3} def remove_connection(): del c.conns[1] c.conns[1].abort.side_effect = remove_connection yield c.heal() self.assertEqual(c.topics, {"test.topic": [0, 1], "other.topic": [3]}) self.assertEqual( c.leaders, {"test.topic": {0: 8, 1: 7}, "other.topic": {3: 2}} ) self.assertEqual(set(list(c)), set([2, 7, 8])) self.assertEqual(c[7], self.broker_hosts[("kafka03", 9092)]) conn3.abort.assert_called_once_with()
def test_read_message(self): response_format = "".join([ "!", "i", "i", "h%dsi" % len("broker01"), # array of brokers "i", "hh%ds" % len("example.foo"), # array of topics "i", # subarray of partitions "hii", "i", "ii", "i", "i", # partition 1 details "hii", "i", "ii", "i", "ii", # partition 2 details ]) raw_response = struct.pack( response_format, 1, # there is 1 broker 8, len("broker01"), b"broker01", 1234, # broker id,host,port 1, # there is 1 topic 0, len("example.foo"), b"example.foo", # topic name, no error 2, # there are 2 topics 0, 1, 1, # partition ID 1, leader is broker 1 2, 2, 3, 1, 2, # two replicas: on 2 & 3, one ISR: broker 2 0, 2, 3, # partition ID 2, leader is broker 3 2, 1, 2, 2, 2, 1, # two replicas: on 1 & 2, both are in ISR set ) raw_data = [ # size of full response (incl. correlation) struct.pack("!i", struct.calcsize(response_format) + 4), struct.pack("!i", 555), # correlation id raw_response ] def get_raw_data(*args): return self.future_value(raw_data.pop(0)) conn = Connection("localhost", 1234) conn.api_correlation = {555: "metadata"} conn.stream = Mock() conn.stream.read_bytes.side_effect = get_raw_data message = yield conn.read_message() expected = metadata.MetadataResponse( brokers=[metadata.Broker(broker_id=8, host="broker01", port=1234)], topics=[ metadata.TopicMetadata( error_code=0, name="example.foo", partitions=[ metadata.PartitionMetadata(error_code=0, partition_id=1, leader=1, replicas=[2, 3], isrs=[2]), metadata.PartitionMetadata(error_code=0, partition_id=2, leader=3, replicas=[1, 2], isrs=[2, 1]), ]), ]) self.assertEqual(message, expected)