def load_metadata_for_topics(self, *topics): """ Discover brokers and metadata for a set of topics. This function is called lazily whenever metadata is unavailable. """ request_id = self._next_id() request = KafkaProtocol.encode_metadata_request( self.client_id, request_id, topics) response = self._send_broker_unaware_request(request_id, request) (brokers, topics) = KafkaProtocol.decode_metadata_response(response) log.debug("Broker metadata: %s", brokers) log.debug("Topic metadata: %s", topics) self.brokers = brokers for topic, partitions in topics.items(): self.reset_topic_metadata(topic) if not partitions: log.warning('No partitions for %s', topic) continue self.topic_partitions[topic] = [] for partition, meta in partitions.items(): self.topic_partitions[topic].append(partition) topic_part = TopicAndPartition(topic, partition) if meta.leader == -1: log.warning('No leader for topic %s partition %s', topic, partition) self.topics_to_brokers[topic_part] = None else: self.topics_to_brokers[topic_part] = brokers[meta.leader]
def test_decode_metadata_response(self): node_brokers = { 0: BrokerMetadata(0, "brokers1.kafka.rdio.com", 1000), 1: BrokerMetadata(1, "brokers1.kafka.rdio.com", 1001), 3: BrokerMetadata(3, "brokers2.kafka.rdio.com", 1000) } topic_partitions = { "topic1": { 0: PartitionMetadata("topic1", 0, 1, (0, 2), (2, )), 1: PartitionMetadata("topic1", 1, 3, (0, 1), (0, 1)) }, "topic2": { 0: PartitionMetadata("topic2", 0, 0, (), ()) } } topic_errors = {"topic1": 0, "topic2": 1} partition_errors = { ("topic1", 0): 0, ("topic1", 1): 1, ("topic2", 0): 0 } encoded = self._create_encoded_metadata_response( node_brokers, topic_partitions, topic_errors, partition_errors) decoded = KafkaProtocol.decode_metadata_response(encoded) self.assertEqual(decoded, (node_brokers, topic_partitions))
def load_metadata_for_topics(self, *topics): """ Discover brokers and metadata for a set of topics. This function is called lazily whenever metadata is unavailable. """ request_id = self._next_id() request = KafkaProtocol.encode_metadata_request(self.client_id, request_id, topics) response = self._send_broker_unaware_request(request_id, request) (brokers, topics) = KafkaProtocol.decode_metadata_response(response) log.debug("Broker metadata: %s", brokers) log.debug("Topic metadata: %s", topics) self.brokers = brokers for topic, partitions in topics.items(): self.reset_topic_metadata(topic) if not partitions: log.warning('No partitions for %s', topic) continue self.topic_partitions[topic] = [] for partition, meta in partitions.items(): self.topic_partitions[topic].append(partition) topic_part = TopicAndPartition(topic, partition) if meta.leader == -1: log.warning('No leader for topic %s partition %s', topic, partition) self.topics_to_brokers[topic_part] = None else: self.topics_to_brokers[topic_part] = brokers[meta.leader]
def test_decode_metadata_response(self): node_brokers = { 0: BrokerMetadata(0, "brokers1.kafka.rdio.com", 1000), 1: BrokerMetadata(1, "brokers1.kafka.rdio.com", 1001), 3: BrokerMetadata(3, "brokers2.kafka.rdio.com", 1000) } topic_partitions = { "topic1": { 0: PartitionMetadata("topic1", 0, 1, (0, 2), (2,)), 1: PartitionMetadata("topic1", 1, 3, (0, 1), (0, 1)) }, "topic2": { 0: PartitionMetadata("topic2", 0, 0, (), ()) } } topic_errors = {"topic1": 0, "topic2": 1} partition_errors = { ("topic1", 0): 0, ("topic1", 1): 1, ("topic2", 0): 0 } encoded = self._create_encoded_metadata_response(node_brokers, topic_partitions, topic_errors, partition_errors) decoded = KafkaProtocol.decode_metadata_response(encoded) self.assertEqual(decoded, (node_brokers, topic_partitions))
def _load_metadata_for_topics(self, *topics): """ Discover brokers and metadata for a set of topics. This method will recurse in the event of a retry. """ requestId = self._next_id() request = KafkaProtocol.encode_metadata_request( self.client_id, requestId, topics) response = self._send_broker_unaware_request(requestId, request) if response is None: raise Exception("All servers failed to process request") (brokers, topics) = KafkaProtocol.decode_metadata_response(response) log.debug("Broker metadata: %s", brokers) log.debug("Topic metadata: %s", topics) self.brokers = brokers self.topics_to_brokers = {} for topic, partitions in topics.items(): # Clear the list once before we add it. This removes stale entries # and avoids duplicates self.topic_partitions.pop(topic, None) if not partitions: log.warn( "Partition is unassigned, delay for 1s and retry. Have you created {} on zookeeper?" .format(topic)) time.sleep(1) self._load_metadata_for_topics(topic) break for partition, meta in partitions.items(): if meta.leader == -1: log.info("Partition is unassigned, delay for 1s and retry") time.sleep(1) self._load_metadata_for_topics(topic) else: topic_part = TopicAndPartition(topic, partition) self.topics_to_brokers[topic_part] = brokers[meta.leader] self.topic_partitions[topic].append(partition)
def test_decode_metadata_response(self): node_brokers = [ BrokerMetadata(0, b"brokers1.kafka.rdio.com", 1000), BrokerMetadata(1, b"brokers1.kafka.rdio.com", 1001), BrokerMetadata(3, b"brokers2.kafka.rdio.com", 1000) ] topic_partitions = [ TopicMetadata(b"topic1", 0, [ PartitionMetadata(b"topic1", 0, 1, (0, 2), (2, ), 0), PartitionMetadata(b"topic1", 1, 3, (0, 1), (0, 1), 1) ]), TopicMetadata(b"topic2", 1, [ PartitionMetadata(b"topic2", 0, 0, (), (), 0), ]), ] encoded = self._create_encoded_metadata_response( node_brokers, topic_partitions) decoded = KafkaProtocol.decode_metadata_response(encoded) self.assertEqual(decoded, (node_brokers, topic_partitions))
def test_decode_metadata_response(self): node_brokers = [ BrokerMetadata(0, b"brokers1.kafka.rdio.com", 1000), BrokerMetadata(1, b"brokers1.kafka.rdio.com", 1001), BrokerMetadata(3, b"brokers2.kafka.rdio.com", 1000) ] topic_partitions = [ TopicMetadata(b"topic1", 0, [ PartitionMetadata(b"topic1", 0, 1, (0, 2), (2,), 0), PartitionMetadata(b"topic1", 1, 3, (0, 1), (0, 1), 1) ]), TopicMetadata(b"topic2", 1, [ PartitionMetadata(b"topic2", 0, 0, (), (), 0), ]), ] encoded = self._create_encoded_metadata_response(node_brokers, topic_partitions) decoded = KafkaProtocol.decode_metadata_response(encoded) self.assertEqual(decoded, (node_brokers, topic_partitions))
def _load_metadata_for_topics(self, *topics): """ Discover brokers and metadata for a set of topics. This method will recurse in the event of a retry. """ request_id = self._next_id() request = KafkaProtocol.encode_metadata_request(self.client_id, request_id, topics) response = self._send_broker_unaware_request(request_id, request) if response is None: raise Exception("All servers failed to process request") (brokers, topics) = KafkaProtocol.decode_metadata_response(response) log.debug("Broker metadata: %s", brokers) log.debug("Topic metadata: %s", topics) self.brokers = brokers self.topics_to_brokers = {} for topic, partitions in topics.items(): # Clear the list once before we add it. This removes stale entries # and avoids duplicates self.topic_partitions.pop(topic, None) if not partitions: log.info("Partition is unassigned, delay for 1s and retry") time.sleep(1) self._load_metadata_for_topics(topic) break for partition, meta in partitions.items(): if meta.leader == -1: log.info("Partition is unassigned, delay for 1s and retry") time.sleep(1) self._load_metadata_for_topics(topic) else: topic_part = TopicAndPartition(topic, partition) self.topics_to_brokers[topic_part] = brokers[meta.leader] self.topic_partitions[topic].append(partition)