def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() # if topic is not None topics_dict will contain only info about that # topic, otherwise it will contain info about all topics for the group topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client, topics=args.topics, ) topics = args.topics if args.topics else ([args.topic] if args.topic else []) unsubscriber = KafkaUnsubscriber(client) unsubscriber.unsubscribe_topics( args.groupid, topics, args.partitions, topics_dict, )
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.groupid, topic=None, partitions=None, cluster_config=cluster_config, client=client, storage=args.storage, fail_on_error=False, ) if not topics_dict: print("Consumer Group ID: {group} does not exist in {storage}".format( group=args.groupid, storage=args.storage, )) sys.exit(1) print("Consumer Group ID: {groupid}".format(groupid=args.groupid)) for topic, partitions in six.iteritems(topics_dict): print("\tTopic: {topic}".format(topic=topic)) print("\t\tPartitions: {partitions}".format(partitions=partitions))
def run(cls, args, cluster_config): if args.old_groupid == args.new_groupid: print( "Error: Old group ID and new group ID are the same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.old_groupid, topic=None, partitions=None, cluster_config=cluster_config, client=client, use_admin_client=args.use_admin_client, ) cls.rename_group( client, args.old_groupid, args.new_groupid, topics_dict, )
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() # if topic is not None topics_dict will contain only info about that # topic, otherwise it will contain info about all topics for the group topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client, topics=args.topics, ) topics = args.topics if args.topics else ( [args.topic] if args.topic else []) unsubscriber = KafkaUnsubscriber(client) unsubscriber.unsubscribe_topics( args.groupid, topics, args.partitions, topics_dict, )
def run(cls, args, cluster_config): if args.source_groupid == args.dest_groupid: print( "Error: Source group ID and destination group ID are same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() source_topics = cls.preprocess_args( args.source_groupid, args.topic, args.partitions, cluster_config, client, use_admin_client=args.use_admin_client, ) cls.copy_group_kafka( client, source_topics, args.source_groupid, args.dest_groupid, )
def fetch_offsets(group, topics): # Setup the Kafka client config = get_cluster_config() client = KafkaToolClient(config.broker_list) offsets = get_current_consumer_offsets(client, group, topics, False) client.close() return offsets
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client ) consumer_offsets_metadata = cls.get_offsets( client, args.groupid, topics_dict, args.storage, ) # Warn the user if a topic being subscribed to does not exist in # Kafka. for topic in topics_dict: if topic not in consumer_offsets_metadata: print( "Warning: Topic {topic} or one or more of it's partitions " "do not exist in Kafka".format(topic=topic), file=sys.stderr, ) client.close() if args.json: print_json(consumer_offsets_metadata) else: cls.print_output(consumer_offsets_metadata, args.watermark)
def run(cls, args, cluster_config): if args.old_groupid == args.new_groupid: print( "Error: Old group ID and new group ID are the same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.old_groupid, topic=None, partitions=None, cluster_config=cluster_config, client=client, storage=args.storage, ) if args.storage == 'kafka': cls.rename_group_with_storage_kafka( client, args.old_groupid, args.new_groupid, topics_dict, ) else: cls.rename_group_with_storage_zookeeper( args.old_groupid, args.new_groupid, topics_dict, cluster_config, )
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client ) with ZK(cluster_config) as zk: if args.storage == 'zookeeper': unsubscriber = ZookeeperUnsubscriber(zk) elif args.storage == 'kafka': unsubscriber = KafkaUnsubscriber(client) else: print( "Invalid storage option: {}".format(args.storage), file=sys.stderr, ) sys.exit(1) unsubscriber.unsubscribe_topic( args.groupid, args.topic, args.partitions, topics_dict, )
def commit_offsets(offsets, group): # Setup the Kafka client config = get_cluster_config() client = KafkaToolClient(config.broker_list) set_consumer_offsets( client, group, offsets, ) client.close()
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() if args.topic and args.topics: print( "Error: Cannot specify --topic and --topics at the same time.", file=sys.stderr, ) sys.exit(1) if args.partitions and args.topics: print( "Error: Cannot use --partitions with --topics. Use --topic " "instead.", file=sys.stderr, ) sys.exit(1) # if topic is not None topics_dict will contain only info about that # topic, otherwise it will contain info about all topics for the group topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client, storage=args.storage, ) topics = args.topics if args.topics else ( [args.topic] if args.topic else []) for topic in topics: if topic not in topics_dict: print( "Error: Consumer {groupid} is not subscribed to topic:" " {topic}.".format(groupid=args.groupid, topic=topic), file=sys.stderr, ) sys.exit(1) with ZK(cluster_config) as zk: if args.storage == 'zookeeper': unsubscriber = ZookeeperUnsubscriber(zk) else: unsubscriber = KafkaUnsubscriber(client) unsubscriber.unsubscribe_topics( args.groupid, topics, args.partitions, topics_dict, )
def get_current_watermarks(self, partitions=None): client = KafkaToolClient(self.kafka_config.broker_list) client.load_metadata_for_topics(CONSUMER_OFFSET_TOPIC) offsets = get_topics_watermarks( client, [CONSUMER_OFFSET_TOPIC], ) partitions_set = set(tp.partition for tp in partitions) if partitions else None return {part: offset for part, offset in six.iteritems(offsets[CONSUMER_OFFSET_TOPIC]) if offset.highmark > offset.lowmark and (partitions is None or part in partitions_set)}
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, None, None, cluster_config, client, ) cls.delete_group_kafka(client, args.groupid, topics_dict)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.groupid, topic=args.topic, partitions=args.partitions, cluster_config=cluster_config, client=client, quiet=args.json, ) consumer_offsets_metadata = cls.get_offsets( client, args.groupid, topics_dict, ) client.close() if args.sort_by_distance: consumer_offsets_metadata = cls.sort_by_distance(consumer_offsets_metadata) elif args.sort_by_distance_percentage: consumer_offsets_metadata = cls.sort_by_distance_percentage(consumer_offsets_metadata) if args.json: partitions_info = [] for partitions in consumer_offsets_metadata.values(): for partition in partitions: partition_info = partition._asdict() partition_info['offset_distance'] = partition_info['highmark'] - partition_info['current'] partition_info['percentage_distance'] = cls.percentage_distance( partition_info['highmark'], partition_info['current'] ) partitions_info.append(partition_info) print_json(partitions_info) else: # Warn the user if a topic being subscribed to does not exist in # Kafka. for topic in topics_dict: if topic not in consumer_offsets_metadata: print( "Warning: Topic {topic} or one or more of it's partitions " "do not exist in Kafka".format(topic=topic), file=sys.stderr, ) cls.print_output(consumer_offsets_metadata, args.watermark)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args(args.groupid, None, None, cluster_config, client, False) if not topics_dict: print("Consumer Group ID: {group} does not exist in " "Zookeeper".format(group=args.groupid)) sys.exit(1) print("Consumer Group ID: {groupid}".format(groupid=args.groupid)) for topic, partitions in topics_dict.iteritems(): print("\tTopic: {topic}".format(topic=topic)) print("\t\tPartitions: {partitions}".format(partitions=partitions))
def get_topic_partition_metadata(hosts): """Returns topic-partition metadata from Kafka broker. kafka-python 1.3+ doesn't include partition metadata information in topic_partitions so we extract it from metadata ourselves. """ kafka_client = KafkaToolClient(hosts, timeout=10) kafka_client.load_metadata_for_topics() topic_partitions = kafka_client.topic_partitions resp = kafka_client.send_metadata_request() for _, topic, partitions in resp.topics: for partition_error, partition, leader, replicas, isr in partitions: if topic_partitions.get(topic, {}).get(partition) is not None: topic_partitions[topic][partition] = PartitionMetadata( topic, partition, leader, replicas, isr, partition_error) return topic_partitions
def run(cls, args, cluster_config): # Fetch offsets from given json-file parsed_consumer_offsets = cls.parse_consumer_offsets(args.json_file) # Setup the Kafka client with closing(KafkaToolClient(cluster_config.broker_list)) as client: client.load_metadata_for_topics() cls.restore_offsets(client, parsed_consumer_offsets, args.storage)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, None, None, cluster_config, client, storage=args.storage, ) if args.storage == 'zookeeper': cls.delete_group_zk(cluster_config, args.groupid) else: cls.delete_group_kafka(client, args.groupid, topics_dict)
def get_topic_partition_metadata(hosts): """Returns topic-partition metadata from Kafka broker. kafka-python 1.3+ doesn't include partition metadata information in topic_partitions so we extract it from metadata ourselves. """ kafka_client = KafkaToolClient(hosts, timeout=10) kafka_client.load_metadata_for_topics() topic_partitions = kafka_client.topic_partitions resp = kafka_client.send_metadata_request() for _, topic, partitions in resp.topics: for partition_error, partition, leader, replicas, isr in partitions: if topic_partitions.get(topic, {}).get(partition) is not None: topic_partitions[topic][partition] = PartitionMetadata(topic, partition, leader, replicas, isr, partition_error) return topic_partitions
def step_impl5(context): config = get_cluster_config() context.client = KafkaToolClient(config.broker_list) offsets = get_current_consumer_offsets( context.client, context.group, [context.topic], ) assert offsets[context.topic][0] == 0
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args(args.groupid, None, None, cluster_config, client) if not args.storage or args.storage == 'zookeeper': cls.delete_group_zk(cluster_config, args.groupid) elif args.storage == 'kafka': cls.delete_group_kafka(client, args.groupid, topics_dict) else: print( "Error: Invalid offset storage option: " "{}.".format(args.storage), file=sys.stderr, ) sys.exit(1)
def set_consumer_group_offset(topic, group, offset): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group, {topic: { 0: offset }}, raise_on_error=True, )
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, None, None, cluster_config, client ) if not args.storage or args.storage == 'zookeeper': cls.delete_group_zk(cluster_config, args.groupid) elif args.storage == 'kafka': cls.delete_group_kafka(client, args.groupid, topics_dict) else: print( "Error: Invalid offset storage option: " "{}.".format(args.storage), file=sys.stderr, ) sys.exit(1)
def produce_example_msg(topic, num_messages=1): kafka = KafkaToolClient(KAFKA_URL) producer = SimpleProducer(kafka) for i in range(num_messages): try: producer.send_messages(topic, b'some message') except LeaderNotAvailableError: # Sometimes kafka takes a bit longer to assign a leader to a new # topic time.sleep(10) producer.send_messages(topic, b'some message')
def create_consumer_group(topic, group_name, num_messages=1): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group_name, {topic: { 0: num_messages }}, raise_on_error=True, ) return client
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() # Let's verify that the consumer does exist in Zookeeper if not args.force: cls.get_topics_from_consumer_group_id( cluster_config, args.groupid, ) try: results = set_consumer_offsets( client, args.groupid, cls.new_offsets_dict, offset_storage=args.storage, ) except TypeError: print( "Error: Badly formatted input, please re-run command " "with --help option.", file=sys.stderr ) raise client.close() if results: final_error_str = ("Error: Unable to commit consumer offsets for:\n") for result in results: error_str = ( " Topic: {topic} Partition: {partition} Error: {error}\n".format( topic=result.topic, partition=result.partition, error=result.error ) ) final_error_str += error_str print(final_error_str, file=sys.stderr) sys.exit(1)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.groupid, topic=args.topic, partitions=args.partitions, cluster_config=cluster_config, client=client, ) try: consumer_offsets_metadata = get_consumer_offsets_metadata( client, args.groupid, topics_dict, ) except KafkaUnavailableError: print( "Error: Encountered error with Kafka, please try again later.", file=sys.stderr, ) raise # Warn the user if a topic being subscribed to does not exist in Kafka. for topic in topics_dict: if topic not in consumer_offsets_metadata: print( "Warning: Topic {topic} does not exist in Kafka" .format(topic=topic), file=sys.stderr, ) cls.save_offsets( consumer_offsets_metadata, topics_dict, args.json_file, args.groupid, ) client.close()
def create_consumer_group_with_kafka_storage(topic, group_name): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group_name, {topic: { 0: 1 }}, offset_storage='kafka', raise_on_error=True, ) return client
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client, force=args.force, use_admin_client=args.use_admin_client, ) try: advance_consumer_offsets( client, args.groupid, topics_dict, ) except TypeError: print("Error: Badly formatted input, please re-run command ", "with --help option.", file=sys.stderr) raise except UnknownMemberIdError: print( "Unable to advance offsets for group '{group_name}' from topic '{topic_name}'. \ You must ensure none of the consumers with this consumer group id are running before \ trying to advance the offsets stored in Kafka for this consumer group. Try stopping all \ of your consumers.".format(group_name=args.groupid, topic_name=args.topic), ) raise client.close()
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, args.topic, args.partitions, cluster_config, client, force=args.force, ) try: rewind_consumer_offsets( client, args.groupid, topics_dict, args.storage, ) except TypeError: print( "Error: Badly formatted input, please re-run command " "with --help option.", file=sys.stderr ) raise client.close()
def get_topic_partition_metadata(hosts): """Returns topic-partition metadata from Kafka broker. kafka-python 1.3+ doesn't include partition metadata information in topic_partitions so we extract it from metadata ourselves. """ topic_partitions = defaultdict(dict) kafka_client = KafkaToolClient(hosts, timeout=10) resp = kafka_client.send_metadata_request() for _, topic, partitions in resp.topics: for partition_error, partition, leader, replicas, isr in partitions: topic_partitions[topic][partition] = PartitionMetadata( topic, partition, leader, replicas, isr, partition_error, ) return topic_partitions
def run(cls, args, cluster_config): if args.source_groupid == args.dest_groupid: print( "Error: Source group ID and destination group ID are same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() source_topics = cls.preprocess_args( args.source_groupid, args.topic, args.partitions, cluster_config, client, ) with ZK(cluster_config) as zk: try: topics_dest_group = zk.get_children( "/consumers/{groupid}/offsets".format( groupid=args.dest_groupid, ) ) except NoNodeError: # Consumer Group ID doesn't exist. pass else: preprocess_topics( args.source_groupid, source_topics.keys(), args.dest_groupid, topics_dest_group, ) # Fetch offsets source_offsets = fetch_offsets(zk, args.source_groupid, source_topics) create_offsets(zk, args.dest_groupid, source_offsets)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( args.groupid, None, None, cluster_config, client, False ) if not topics_dict: print( "Consumer Group ID: {group} does not exist in " "Zookeeper".format( group=args.groupid ) ) sys.exit(1) print("Consumer Group ID: {groupid}".format(groupid=args.groupid)) for topic, partitions in topics_dict.iteritems(): print("\tTopic: {topic}".format(topic=topic)) print("\t\tPartitions: {partitions}".format(partitions=partitions))
def run(cls, args, cluster_config): if args.source_groupid == args.dest_groupid: print( "Error: Source group ID and destination group ID are same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() source_topics = cls.preprocess_args( args.source_groupid, args.topic, args.partitions, cluster_config, client, ) with ZK(cluster_config) as zk: try: topics_dest_group = zk.get_children( "/consumers/{groupid}/offsets".format( groupid=args.dest_groupid, )) except NoNodeError: # Consumer Group ID doesn't exist. pass else: preprocess_topics( args.source_groupid, source_topics.keys(), args.dest_groupid, topics_dest_group, ) # Fetch offsets source_offsets = fetch_offsets(zk, args.source_groupid, source_topics) create_offsets(zk, args.dest_groupid, source_offsets)
def run(cls, args, cluster_config): if args.source_groupid == args.dest_groupid: print( "Error: Source group ID and destination group ID are same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() source_topics = cls.preprocess_args( args.source_groupid, args.topic, args.partitions, cluster_config, client, ) cls.copy_group_kafka( client, source_topics, args.source_groupid, args.dest_groupid, )
def initialize_kafka_offsets_topic(): if '__consumer_offsets' in list_topics(): return topic = create_random_topic(1, 1) produce_example_msg(topic, num_messages=1) kafka = KafkaToolClient(KAFKA_URL) set_consumer_offsets( kafka, create_random_group_id(), {topic: { 0: 1 }}, raise_on_error=True, ) time.sleep(20)
def run(cls, args, cluster_config): if args.old_groupid == args.new_groupid: print( "Error: Old group ID and new group ID are the same.", file=sys.stderr, ) sys.exit(1) # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.old_groupid, topic=None, partitions=None, cluster_config=cluster_config, client=client, ) cls.rename_group( client, args.old_groupid, args.new_groupid, topics_dict, )
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.groupid, topic=args.topic, partitions=args.partitions, cluster_config=cluster_config, client=client, quiet=args.json, storage=args.storage, ) consumer_offsets_metadata = cls.get_offsets( client, args.groupid, topics_dict, args.storage, ) client.close() if args.sort_by_distance: consumer_offsets_metadata = cls.sort_by_distance( consumer_offsets_metadata) elif args.sort_by_distance_percentage: consumer_offsets_metadata = cls.sort_by_distance_percentage( consumer_offsets_metadata) if args.json: partitions_info = [] for partitions in consumer_offsets_metadata.values(): for partition in partitions: partition_info = partition._asdict() partition_info['offset_distance'] = partition_info[ 'highmark'] - partition_info['current'] partition_info[ 'percentage_distance'] = cls.percentage_distance( partition_info['highmark'], partition_info['current']) partitions_info.append(partition_info) print_json(partitions_info) else: # Warn the user if a topic being subscribed to does not exist in # Kafka. for topic in topics_dict: if topic not in consumer_offsets_metadata: print( "Warning: Topic {topic} or one or more of it's partitions " "do not exist in Kafka".format(topic=topic), file=sys.stderr, ) cls.print_output(consumer_offsets_metadata, args.watermark)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args( groupid=args.groupid, topic=args.topic, partitions=args.partitions, cluster_config=cluster_config, client=client, storage=args.storage, ) try: consumer_offsets_metadata = get_consumer_offsets_metadata( client, args.groupid, topics_dict, offset_storage=args.storage, ) except KafkaUnavailableError: print( "Error: Encountered error with Kafka, please try again later.", file=sys.stderr, ) raise # Warn the user if a topic being subscribed to does not exist in Kafka. for topic in topics_dict: if topic not in consumer_offsets_metadata: print( "Warning: Topic {topic} does not exist in Kafka".format( topic=topic), file=sys.stderr, ) cls.save_offsets( consumer_offsets_metadata, topics_dict, args.json_file, args.groupid, ) client.close()
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() # Let's verify that the consumer does exist in Zookeeper if not args.force: cls.get_topics_from_consumer_group_id( cluster_config, args.groupid, storage=args.storage, ) try: results = set_consumer_offsets( client, args.groupid, cls.new_offsets_dict, offset_storage=args.storage, ) except TypeError: print( "Error: Badly formatted input, please re-run command " "with --help option.", file=sys.stderr) raise client.close() if results: final_error_str = ( "Error: Unable to commit consumer offsets for:\n") for result in results: error_str = ( " Topic: {topic} Partition: {partition} Error: {error}\n". format(topic=result.topic, partition=result.partition, error=result.error)) final_error_str += error_str print(final_error_str, file=sys.stderr) sys.exit(1)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() topics_dict = cls.preprocess_args(args.groupid, args.topic, args.partitions, cluster_config, client, quiet=args.json) consumer_offsets_metadata = cls.get_offsets( client, args.groupid, topics_dict, args.storage, ) client.close() if args.json: print_json([ p._asdict() for partitions in consumer_offsets_metadata.values() for p in partitions ]) else: # Warn the user if a topic being subscribed to does not exist in # Kafka. for topic in topics_dict: if topic not in consumer_offsets_metadata: print( "Warning: Topic {topic} or one or more of it's partitions " "do not exist in Kafka".format(topic=topic), file=sys.stderr, ) cls.print_output(consumer_offsets_metadata, args.watermark)