Example #1
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.groupid,
            topic=None,
            partitions=None,
            cluster_config=cluster_config,
            client=client,
            storage=args.storage,
            fail_on_error=False,
        )
        if not topics_dict:
            print("Consumer Group ID: {group} does not exist in {storage}".format(
                group=args.groupid,
                storage=args.storage,
            ))
            sys.exit(1)

        print("Consumer Group ID: {groupid}".format(groupid=args.groupid))
        for topic, partitions in six.iteritems(topics_dict):
            print("\tTopic: {topic}".format(topic=topic))
            print("\t\tPartitions: {partitions}".format(partitions=partitions))
Example #2
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            force=args.force,
        )
        try:
            rewind_consumer_offsets(
                client,
                args.groupid,
                topics_dict,
                args.storage,
            )
        except TypeError:
            print(
                "Error: Badly formatted input, please re-run command "
                "with --help option.", file=sys.stderr
            )
            raise

        client.close()
Example #3
0
    def run(cls, args, cluster_config):
        if args.old_groupid == args.new_groupid:
            print(
                "Error: Old group ID and new group ID are the same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.old_groupid,
            topic=None,
            partitions=None,
            cluster_config=cluster_config,
            client=client,
            use_admin_client=args.use_admin_client,
        )
        cls.rename_group(
            client,
            args.old_groupid,
            args.new_groupid,
            topics_dict,
        )
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            force=args.force,
            use_admin_client=args.use_admin_client,
        )
        try:
            advance_consumer_offsets(
                client,
                args.groupid,
                topics_dict,
            )
        except TypeError:
            print("Error: Badly formatted input, please re-run command ",
                  "with --help option.",
                  file=sys.stderr)
            raise
        except UnknownMemberIdError:
            print(
                "Unable to advance offsets for group '{group_name}' from topic '{topic_name}'. \
                    You must ensure none of the consumers with this consumer group id are running before \
                    trying to advance the offsets stored in Kafka for this consumer group. Try stopping all \
                    of your consumers.".format(group_name=args.groupid,
                                               topic_name=args.topic), )
            raise

        client.close()
Example #5
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        # if topic is not None topics_dict will contain only info about that
        # topic, otherwise it will contain info about all topics for the group
        topics_dict = cls.preprocess_args(
            args.groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            topics=args.topics,
        )

        topics = args.topics if args.topics else ([args.topic] if args.topic else [])

        unsubscriber = KafkaUnsubscriber(client)
        unsubscriber.unsubscribe_topics(
            args.groupid,
            topics,
            args.partitions,
            topics_dict,
        )
    def run(cls, args, cluster_config):
        if args.source_groupid == args.dest_groupid:
            print(
                "Error: Source group ID and destination group ID are same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()
        source_topics = cls.preprocess_args(
            args.source_groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            use_admin_client=args.use_admin_client,
        )

        cls.copy_group_kafka(
            client,
            source_topics,
            args.source_groupid,
            args.dest_groupid,
        )
Example #7
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid, args.topic, args.partitions, cluster_config, client
        )

        consumer_offsets_metadata = cls.get_offsets(
            client,
            args.groupid,
            topics_dict,
            args.storage,
        )

        # Warn the user if a topic being subscribed to does not exist in
        # Kafka.
        for topic in topics_dict:
            if topic not in consumer_offsets_metadata:
                print(
                    "Warning: Topic {topic} or one or more of it's partitions "
                    "do not exist in Kafka".format(topic=topic),
                    file=sys.stderr,
                )
        client.close()
        if args.json:
            print_json(consumer_offsets_metadata)
        else:
            cls.print_output(consumer_offsets_metadata, args.watermark)
Example #8
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        # if topic is not None topics_dict will contain only info about that
        # topic, otherwise it will contain info about all topics for the group
        topics_dict = cls.preprocess_args(
            args.groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            topics=args.topics,
        )

        topics = args.topics if args.topics else (
            [args.topic] if args.topic else [])

        unsubscriber = KafkaUnsubscriber(client)
        unsubscriber.unsubscribe_topics(
            args.groupid,
            topics,
            args.partitions,
            topics_dict,
        )
Example #9
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid, args.topic, args.partitions, cluster_config, client
        )
        with ZK(cluster_config) as zk:
            if args.storage == 'zookeeper':
                unsubscriber = ZookeeperUnsubscriber(zk)
            elif args.storage == 'kafka':
                unsubscriber = KafkaUnsubscriber(client)
            else:
                print(
                    "Invalid storage option: {}".format(args.storage),
                    file=sys.stderr,
                )
                sys.exit(1)

            unsubscriber.unsubscribe_topic(
                args.groupid,
                args.topic,
                args.partitions,
                topics_dict,
            )
Example #10
0
    def run(cls, args, cluster_config):
        if args.old_groupid == args.new_groupid:
            print(
                "Error: Old group ID and new group ID are the same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.old_groupid,
            topic=None,
            partitions=None,
            cluster_config=cluster_config,
            client=client,
            storage=args.storage,
        )
        if args.storage == 'kafka':
            cls.rename_group_with_storage_kafka(
                client,
                args.old_groupid,
                args.new_groupid,
                topics_dict,
            )
        else:
            cls.rename_group_with_storage_zookeeper(
                args.old_groupid,
                args.new_groupid,
                topics_dict,
                cluster_config,
            )
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        if args.topic and args.topics:
            print(
                "Error: Cannot specify --topic and --topics at the same time.",
                file=sys.stderr,
            )
            sys.exit(1)

        if args.partitions and args.topics:
            print(
                "Error: Cannot use --partitions with --topics. Use --topic "
                "instead.",
                file=sys.stderr,
            )
            sys.exit(1)

        # if topic is not None topics_dict will contain only info about that
        # topic, otherwise it will contain info about all topics for the group
        topics_dict = cls.preprocess_args(
            args.groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            storage=args.storage,
        )

        topics = args.topics if args.topics else (
            [args.topic] if args.topic else [])
        for topic in topics:
            if topic not in topics_dict:
                print(
                    "Error: Consumer {groupid} is not subscribed to topic:"
                    " {topic}.".format(groupid=args.groupid, topic=topic),
                    file=sys.stderr,
                )
                sys.exit(1)

        with ZK(cluster_config) as zk:
            if args.storage == 'zookeeper':
                unsubscriber = ZookeeperUnsubscriber(zk)
            else:
                unsubscriber = KafkaUnsubscriber(client)

            unsubscriber.unsubscribe_topics(
                args.groupid,
                topics,
                args.partitions,
                topics_dict,
            )
Example #12
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.groupid,
            topic=args.topic,
            partitions=args.partitions,
            cluster_config=cluster_config,
            client=client,
            quiet=args.json,
            storage=args.storage,
        )

        consumer_offsets_metadata = cls.get_offsets(
            client,
            args.groupid,
            topics_dict,
            args.storage,
        )
        client.close()

        if args.sort_by_distance:
            consumer_offsets_metadata = cls.sort_by_distance(
                consumer_offsets_metadata)
        elif args.sort_by_distance_percentage:
            consumer_offsets_metadata = cls.sort_by_distance_percentage(
                consumer_offsets_metadata)

        if args.json:
            partitions_info = []
            for partitions in consumer_offsets_metadata.values():
                for partition in partitions:
                    partition_info = partition._asdict()
                    partition_info['offset_distance'] = partition_info[
                        'highmark'] - partition_info['current']
                    partition_info[
                        'percentage_distance'] = cls.percentage_distance(
                            partition_info['highmark'],
                            partition_info['current'])
                    partitions_info.append(partition_info)
            print_json(partitions_info)
        else:
            # Warn the user if a topic being subscribed to does not exist in
            # Kafka.
            for topic in topics_dict:
                if topic not in consumer_offsets_metadata:
                    print(
                        "Warning: Topic {topic} or one or more of it's partitions "
                        "do not exist in Kafka".format(topic=topic),
                        file=sys.stderr,
                    )
            cls.print_output(consumer_offsets_metadata, args.watermark)
Example #13
0
 def get_current_watermarks(self, partitions=None):
     client = KafkaToolClient(self.kafka_config.broker_list)
     client.load_metadata_for_topics(CONSUMER_OFFSET_TOPIC)
     offsets = get_topics_watermarks(
         client,
         [CONSUMER_OFFSET_TOPIC],
     )
     partitions_set = set(tp.partition for tp in partitions) if partitions else None
     return {part: offset for part, offset
             in six.iteritems(offsets[CONSUMER_OFFSET_TOPIC])
             if offset.highmark > offset.lowmark and
             (partitions is None or part in partitions_set)}
Example #14
0
 def get_current_watermarks(self, partitions=None):
     client = KafkaToolClient(self.kafka_config.broker_list)
     client.load_metadata_for_topics(CONSUMER_OFFSET_TOPIC)
     offsets = get_topics_watermarks(
         client,
         [CONSUMER_OFFSET_TOPIC],
     )
     partitions_set = set(tp.partition for tp in partitions) if partitions else None
     return {part: offset for part, offset
             in six.iteritems(offsets[CONSUMER_OFFSET_TOPIC])
             if offset.highmark > offset.lowmark and
             (partitions is None or part in partitions_set)}
Example #15
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid,
            None,
            None,
            cluster_config,
            client,
        )
        cls.delete_group_kafka(client, args.groupid, topics_dict)
Example #16
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid,
            None,
            None,
            cluster_config,
            client,
        )
        cls.delete_group_kafka(client, args.groupid, topics_dict)
Example #17
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.groupid,
            topic=args.topic,
            partitions=args.partitions,
            cluster_config=cluster_config,
            client=client,
            quiet=args.json,
        )

        consumer_offsets_metadata = cls.get_offsets(
            client,
            args.groupid,
            topics_dict,
        )
        client.close()

        if args.sort_by_distance:
            consumer_offsets_metadata = cls.sort_by_distance(consumer_offsets_metadata)
        elif args.sort_by_distance_percentage:
            consumer_offsets_metadata = cls.sort_by_distance_percentage(consumer_offsets_metadata)

        if args.json:
            partitions_info = []
            for partitions in consumer_offsets_metadata.values():
                for partition in partitions:
                    partition_info = partition._asdict()
                    partition_info['offset_distance'] = partition_info['highmark'] - partition_info['current']
                    partition_info['percentage_distance'] = cls.percentage_distance(
                        partition_info['highmark'],
                        partition_info['current']
                    )
                    partitions_info.append(partition_info)
            print_json(partitions_info)
        else:
            # Warn the user if a topic being subscribed to does not exist in
            # Kafka.
            for topic in topics_dict:
                if topic not in consumer_offsets_metadata:
                    print(
                        "Warning: Topic {topic} or one or more of it's partitions "
                        "do not exist in Kafka".format(topic=topic),
                        file=sys.stderr,
                    )
            cls.print_output(consumer_offsets_metadata, args.watermark)
Example #18
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(args.groupid, None, None,
                                          cluster_config, client, False)
        if not topics_dict:
            print("Consumer Group ID: {group} does not exist in "
                  "Zookeeper".format(group=args.groupid))
            sys.exit(1)

        print("Consumer Group ID: {groupid}".format(groupid=args.groupid))
        for topic, partitions in topics_dict.iteritems():
            print("\tTopic: {topic}".format(topic=topic))
            print("\t\tPartitions: {partitions}".format(partitions=partitions))
Example #19
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid,
            None,
            None,
            cluster_config,
            client,
            storage=args.storage,
        )
        if args.storage == 'zookeeper':
            cls.delete_group_zk(cluster_config, args.groupid)
        else:
            cls.delete_group_kafka(client, args.groupid, topics_dict)
Example #20
0
def get_topic_partition_metadata(hosts):
    """Returns topic-partition metadata from Kafka broker.

    kafka-python 1.3+ doesn't include partition metadata information in
    topic_partitions so we extract it from metadata ourselves.
    """
    kafka_client = KafkaToolClient(hosts, timeout=10)
    kafka_client.load_metadata_for_topics()
    topic_partitions = kafka_client.topic_partitions
    resp = kafka_client.send_metadata_request()

    for _, topic, partitions in resp.topics:
        for partition_error, partition, leader, replicas, isr in partitions:
            if topic_partitions.get(topic, {}).get(partition) is not None:
                topic_partitions[topic][partition] = PartitionMetadata(
                    topic, partition, leader, replicas, isr, partition_error)
    return topic_partitions
Example #21
0
def get_topic_partition_metadata(hosts):
    """Returns topic-partition metadata from Kafka broker.

    kafka-python 1.3+ doesn't include partition metadata information in
    topic_partitions so we extract it from metadata ourselves.
    """
    kafka_client = KafkaToolClient(hosts, timeout=10)
    kafka_client.load_metadata_for_topics()
    topic_partitions = kafka_client.topic_partitions
    resp = kafka_client.send_metadata_request()

    for _, topic, partitions in resp.topics:
        for partition_error, partition, leader, replicas, isr in partitions:
            if topic_partitions.get(topic, {}).get(partition) is not None:
                topic_partitions[topic][partition] = PartitionMetadata(topic, partition, leader,
                                                                       replicas, isr, partition_error)
    return topic_partitions
Example #22
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(args.groupid, None, None,
                                          cluster_config, client)
        if not args.storage or args.storage == 'zookeeper':
            cls.delete_group_zk(cluster_config, args.groupid)
        elif args.storage == 'kafka':
            cls.delete_group_kafka(client, args.groupid, topics_dict)
        else:
            print(
                "Error: Invalid offset storage option: "
                "{}.".format(args.storage),
                file=sys.stderr,
            )
            sys.exit(1)
Example #23
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.groupid,
            topic=args.topic,
            partitions=args.partitions,
            cluster_config=cluster_config,
            client=client,
            storage=args.storage,
        )
        try:
            consumer_offsets_metadata = get_consumer_offsets_metadata(
                client,
                args.groupid,
                topics_dict,
                offset_storage=args.storage,
            )
        except KafkaUnavailableError:
            print(
                "Error: Encountered error with Kafka, please try again later.",
                file=sys.stderr,
            )
            raise

        # Warn the user if a topic being subscribed to does not exist in Kafka.
        for topic in topics_dict:
            if topic not in consumer_offsets_metadata:
                print(
                    "Warning: Topic {topic} does not exist in Kafka".format(
                        topic=topic),
                    file=sys.stderr,
                )

        cls.save_offsets(
            consumer_offsets_metadata,
            topics_dict,
            args.json_file,
            args.groupid,
        )
        client.close()
Example #24
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid, None, None, cluster_config, client
        )
        if not args.storage or args.storage == 'zookeeper':
            cls.delete_group_zk(cluster_config, args.groupid)
        elif args.storage == 'kafka':
            cls.delete_group_kafka(client, args.groupid, topics_dict)
        else:
            print(
                "Error: Invalid offset storage option: "
                "{}.".format(args.storage),
                file=sys.stderr,
            )
            sys.exit(1)
Example #25
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        # Let's verify that the consumer does exist in Zookeeper
        if not args.force:
            cls.get_topics_from_consumer_group_id(
                cluster_config,
                args.groupid,
                storage=args.storage,
            )

        try:
            results = set_consumer_offsets(
                client,
                args.groupid,
                cls.new_offsets_dict,
                offset_storage=args.storage,
            )
        except TypeError:
            print(
                "Error: Badly formatted input, please re-run command "
                "with --help option.",
                file=sys.stderr)
            raise

        client.close()

        if results:
            final_error_str = (
                "Error: Unable to commit consumer offsets for:\n")
            for result in results:
                error_str = (
                    "  Topic: {topic} Partition: {partition} Error: {error}\n".
                    format(topic=result.topic,
                           partition=result.partition,
                           error=result.error))
                final_error_str += error_str
            print(final_error_str, file=sys.stderr)
            sys.exit(1)
Example #26
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        # Let's verify that the consumer does exist in Zookeeper
        if not args.force:
            cls.get_topics_from_consumer_group_id(
                cluster_config,
                args.groupid,
            )

        try:
            results = set_consumer_offsets(
                client,
                args.groupid,
                cls.new_offsets_dict,
                offset_storage=args.storage,
            )
        except TypeError:
            print(
                "Error: Badly formatted input, please re-run command "
                "with --help option.", file=sys.stderr
            )
            raise

        client.close()

        if results:
            final_error_str = ("Error: Unable to commit consumer offsets for:\n")
            for result in results:
                error_str = (
                    "  Topic: {topic} Partition: {partition} Error: {error}\n".format(
                        topic=result.topic,
                        partition=result.partition,
                        error=result.error
                    )
                )
                final_error_str += error_str
            print(final_error_str, file=sys.stderr)
            sys.exit(1)
Example #27
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.groupid,
            topic=args.topic,
            partitions=args.partitions,
            cluster_config=cluster_config,
            client=client,
        )
        try:
            consumer_offsets_metadata = get_consumer_offsets_metadata(
                client,
                args.groupid,
                topics_dict,
            )
        except KafkaUnavailableError:
            print(
                "Error: Encountered error with Kafka, please try again later.",
                file=sys.stderr,
            )
            raise

        # Warn the user if a topic being subscribed to does not exist in Kafka.
        for topic in topics_dict:
            if topic not in consumer_offsets_metadata:
                print(
                    "Warning: Topic {topic} does not exist in Kafka"
                    .format(topic=topic),
                    file=sys.stderr,
                )

        cls.save_offsets(
            consumer_offsets_metadata,
            topics_dict,
            args.json_file,
            args.groupid,
        )
        client.close()
Example #28
0
    def run(cls, args, cluster_config):
        if args.source_groupid == args.dest_groupid:
            print(
                "Error: Source group ID and destination group ID are same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()
        source_topics = cls.preprocess_args(
            args.source_groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
        )
        with ZK(cluster_config) as zk:
            try:
                topics_dest_group = zk.get_children(
                    "/consumers/{groupid}/offsets".format(
                        groupid=args.dest_groupid,
                    )
                )
            except NoNodeError:
                # Consumer Group ID doesn't exist.
                pass
            else:
                preprocess_topics(
                    args.source_groupid,
                    source_topics.keys(),
                    args.dest_groupid,
                    topics_dest_group,
                )

            # Fetch offsets
            source_offsets = fetch_offsets(zk, args.source_groupid, source_topics)
            create_offsets(zk, args.dest_groupid, source_offsets)
Example #29
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid, None, None,
            cluster_config, client,
            False
        )
        if not topics_dict:
            print(
                "Consumer Group ID: {group} does not exist in "
                "Zookeeper".format(
                    group=args.groupid
                )
            )
            sys.exit(1)

        print("Consumer Group ID: {groupid}".format(groupid=args.groupid))
        for topic, partitions in topics_dict.iteritems():
            print("\tTopic: {topic}".format(topic=topic))
            print("\t\tPartitions: {partitions}".format(partitions=partitions))
Example #30
0
    def run(cls, args, cluster_config):
        if args.source_groupid == args.dest_groupid:
            print(
                "Error: Source group ID and destination group ID are same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()
        source_topics = cls.preprocess_args(
            args.source_groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
        )
        with ZK(cluster_config) as zk:
            try:
                topics_dest_group = zk.get_children(
                    "/consumers/{groupid}/offsets".format(
                        groupid=args.dest_groupid, ))
            except NoNodeError:
                # Consumer Group ID doesn't exist.
                pass
            else:
                preprocess_topics(
                    args.source_groupid,
                    source_topics.keys(),
                    args.dest_groupid,
                    topics_dest_group,
                )

            # Fetch offsets
            source_offsets = fetch_offsets(zk, args.source_groupid,
                                           source_topics)
            create_offsets(zk, args.dest_groupid, source_offsets)
Example #31
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(args.groupid,
                                          args.topic,
                                          args.partitions,
                                          cluster_config,
                                          client,
                                          quiet=args.json)

        consumer_offsets_metadata = cls.get_offsets(
            client,
            args.groupid,
            topics_dict,
            args.storage,
        )
        client.close()

        if args.json:
            print_json([
                p._asdict()
                for partitions in consumer_offsets_metadata.values()
                for p in partitions
            ])
        else:
            # Warn the user if a topic being subscribed to does not exist in
            # Kafka.
            for topic in topics_dict:
                if topic not in consumer_offsets_metadata:
                    print(
                        "Warning: Topic {topic} or one or more of it's partitions "
                        "do not exist in Kafka".format(topic=topic),
                        file=sys.stderr,
                    )
            cls.print_output(consumer_offsets_metadata, args.watermark)
Example #32
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()
        watermarks = {}

        if args.exact:
            watermarks = cls.get_watermarks(
                client,
                args.topic,
                exact=True,
            )
        else:
            watermarks = cls.get_watermarks(
                client,
                args.topic,
                exact=False,
            )

        client.close()
        if args.json:
            print_json(watermarks)
        else:
            cls.print_output(watermarks)
Example #33
0
    def run(cls, args, cluster_config):
        if args.old_groupid == args.new_groupid:
            print(
                "Error: Old group ID and new group ID are the same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            groupid=args.old_groupid,
            topic=None,
            partitions=None,
            cluster_config=cluster_config,
            client=client,
        )
        cls.rename_group(
            client,
            args.old_groupid,
            args.new_groupid,
            topics_dict,
        )
Example #34
0
    def run(cls, args, cluster_config):
        if args.source_groupid == args.dest_groupid:
            print(
                "Error: Source group ID and destination group ID are same.",
                file=sys.stderr,
            )
            sys.exit(1)
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()
        source_topics = cls.preprocess_args(
            args.source_groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
        )

        cls.copy_group_kafka(
            client,
            source_topics,
            args.source_groupid,
            args.dest_groupid,
        )