コード例 #1
0
def get_topic_partition_with_error(cluster_config,
                                   error,
                                   fetch_unavailable_brokers=False):
    """Fetches the metadata from the cluster and returns the set of
    (topic, partition) tuples containing all the topic-partitions
    currently affected by the specified error. It also fetches unavailable-broker list
    if required."""

    metadata = get_topic_partition_metadata(cluster_config.broker_list)
    affected_partitions = set()
    if fetch_unavailable_brokers:
        unavailable_brokers = set()
    with ZK(cluster_config) as zk:
        for partitions in metadata.values():
            for partition_metadata in partitions.values():
                if int(partition_metadata.error) == error:
                    if fetch_unavailable_brokers:
                        unavailable_brokers |= get_unavailable_brokers(
                            zk, partition_metadata)
                    affected_partitions.add((partition_metadata.topic,
                                             partition_metadata.partition))

    if fetch_unavailable_brokers:
        return affected_partitions, unavailable_brokers
    else:
        return affected_partitions
コード例 #2
0
    def test_set_topic_config(self, mock_client):
        with mock.patch.object(
            ZK,
            'set',
            autospec=True
        ) as mock_set:
            with ZK(self.cluster_config) as zk:
                zk.set_topic_config(
                    "some_topic",
                    {"version": 1, "config": {"cleanup.policy": "compact"}}
                )
                mock_set.assert_called_once_with(
                    zk,
                    '/config/topics/some_topic',
                    json.dumps({"version": 1, "config": {"cleanup.policy": "compact"}})
                )

                expected_create_call = mock.call(
                    '/config/changes/config_change_',
                    "some_topic",
                    None,
                    False,
                    True,
                    False
                )
                assert mock_client.return_value.create.call_args_list == [expected_create_call]
コード例 #3
0
    def test_create(self, mock_client):
        with ZK(self.cluster_config) as zk:
            zk.create('/kafka/consumers/some_group/offsets')
            zk.create('/kafka/consumers/some_group/offsets',
                      value='some_val',
                      acl=None,
                      ephemeral=True,
                      sequence=True,
                      makepath=True)
            mock_obj = mock.Mock()
            zk.create(
                '/kafka/consumers/some_group/offsets',
                value='some_val',
                acl=mock_obj,
            )

            call_list = [
                mock.call('/kafka/consumers/some_group/offsets', '', None,
                          False, False, False),
                mock.call('/kafka/consumers/some_group/offsets', 'some_val',
                          None, True, True, True),
                mock.call('/kafka/consumers/some_group/offsets', 'some_val',
                          mock_obj, False, False, False),
            ]
            assert mock_client.return_value.create.call_args_list == call_list
コード例 #4
0
 def get_topics_for_group_from_zookeeper(
         cls,
         cluster_config,
         groupid,
         fail_on_error
 ):
     topics = []
     with ZK(cluster_config) as zk:
         # Query zookeeper to get the list of topics that this consumer is
         # subscribed to.
         try:
             topics = zk.get_my_subscribed_topics(groupid)
         except NoNodeError:
             if groupid in zk.get_children("/consumers"):
                 print(
                     "Error: Offsets for Consumer Group ID {groupid} not found.".format(
                         groupid=groupid
                     ),
                     file=sys.stderr,
                 )
             else:
                 if fail_on_error:
                     print(
                         "Error: Consumer Group ID {groupid} does not exist.".format(
                             groupid=groupid
                         ),
                         file=sys.stderr,
                     )
                     sys.exit(1)
     return topics
コード例 #5
0
    def apply_log_compaction(self, topics):
        self.log.info("Applying compaction settings on {} topics".format(
            len(topics)))

        compacted_topics = []
        skipped_topics = []
        missed_topics = []

        cluster = get_config().cluster_config

        with ZK(cluster) as zk:
            for topic in topics:
                try:
                    current_config = zk.get_topic_config(topic)
                    if 'cleanup.policy' not in current_config['config']:
                        # if we already have the config set or there was a
                        # manual override we don't want to set again
                        current_config['config']['cleanup.policy'] = 'compact'
                        if not self.dry_run:
                            zk.set_topic_config(topic=topic,
                                                value=current_config)
                        compacted_topics.append(topic)
                    else:
                        skipped_topics.append(topic)
                except NoNodeError:
                    missed_topics.append(topic)

        self.log_results(compacted_topics=compacted_topics,
                         skipped_topics=skipped_topics,
                         missed_topics=missed_topics)
コード例 #6
0
    def run(self, cluster_config, args):
        self.cluster_config = cluster_config
        self.args = args
        with ZK(self.cluster_config) as self.zk:
            broker_ids = get_broker_ids(self.zk)

            if args.controller_only and not is_controller(
                    self.zk, args.broker_id):
                terminate(
                    status_code.OK,
                    prepare_terminate_message(
                        'Broker {} is not the controller, nothing to check'.
                        format(args.broker_id), ),
                    args.json,
                )
            if args.first_broker_only and not broker_ids:
                terminate(
                    status_code.OK,
                    prepare_terminate_message(
                        'No brokers detected, nothing to check'),
                    args.json,
                )
            if args.first_broker_only and not is_first_broker(
                    broker_ids, args.broker_id):
                terminate(
                    status_code.OK,
                    prepare_terminate_message(
                        'Broker {} has not the lowest id, nothing to check'.
                        format(args.broker_id), ),
                    args.json,
                )
            return self.run_command()
コード例 #7
0
 def test_delete_topic_partitions(self, mock_client):
     with mock.patch.object(
         ZK,
         'delete',
         autospec=True
     ) as mock_delete:
         with ZK(self.cluster_config) as zk:
             zk.delete_topic_partitions(
                 'some_group',
                 'some_topic',
                 [0, 1, 2]
             )
             call_list = [
                 mock.call(
                     zk,
                     '/consumers/some_group/offsets/some_topic/0'
                 ),
                 mock.call(
                     zk,
                     '/consumers/some_group/offsets/some_topic/1'
                 ),
                 mock.call(
                     zk,
                     '/consumers/some_group/offsets/some_topic/2'
                 ),
             ]
             assert mock_delete.call_args_list == call_list
コード例 #8
0
    def rename_group_with_storage_zookeeper(cls, old_groupid, new_groupid,
                                            topics_dict, cluster_config):
        with ZK(cluster_config) as zk:
            try:
                topics = zk.get_children(
                    "/consumers/{groupid}/offsets".format(groupid=new_groupid))
            except NoNodeError:
                # Consumer Group ID doesn't exist.
                pass
            else:
                preprocess_topics(
                    old_groupid,
                    list(topics_dict.keys()),
                    new_groupid,
                    topics,
                )

            old_offsets = fetch_offsets(zk, old_groupid, topics_dict)
            create_offsets(zk, new_groupid, old_offsets)
            try:
                old_base_path = "/consumers/{groupid}".format(
                    groupid=old_groupid, )
                zk.delete(old_base_path, recursive=True)
            except:
                print(
                    "Error: Unable to migrate all metadata in Zookeeper. "
                    "Please re-run the command.",
                    file=sys.stderr)
                raise
コード例 #9
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        topics_dict = cls.preprocess_args(
            args.groupid, args.topic, args.partitions, cluster_config, client
        )
        with ZK(cluster_config) as zk:
            if args.storage == 'zookeeper':
                unsubscriber = ZookeeperUnsubscriber(zk)
            elif args.storage == 'kafka':
                unsubscriber = KafkaUnsubscriber(client)
            else:
                print(
                    "Invalid storage option: {}".format(args.storage),
                    file=sys.stderr,
                )
                sys.exit(1)

            unsubscriber.unsubscribe_topic(
                args.groupid,
                args.topic,
                args.partitions,
                topics_dict,
            )
コード例 #10
0
 def run(self, cluster_config, args):
     self.cluster_config = cluster_config
     self.args = args
     with ZK(self.cluster_config) as self.zk:
         if args.controller_only:
             check_run_on_controller(self.zk, self.args)
         return self.run_command()
コード例 #11
0
def step_impl5(context):
    cluster_config = get_cluster_config()
    with ZK(cluster_config) as zk:
        offsets = zk.get_group_offsets(context.group)
        new_offsets = zk.get_group_offsets(NEW_GROUP)
    assert context.topic in offsets
    assert new_offsets == offsets
コード例 #12
0
    def test_set_topic_config_kafka_9(self, mock_client):
        with mock.patch.object(ZK, 'set', autospec=True) as mock_set:
            with ZK(self.cluster_config) as zk:
                config = {
                    "version": 1,
                    "config": {
                        "cleanup.policy": "compact"
                    }
                }
                config_change = {
                    "version": 1,
                    "entity_type": "topics",
                    "entity_name": "some_topic"
                }

                zk.set_topic_config("some_topic", config, (0, 9, 2))

                serialized_config = dump_json(config)
                serialized_config_change = dump_json(config_change)

                mock_set.assert_called_once_with(
                    zk,
                    '/config/topics/some_topic',
                    serialized_config,
                )

                expected_create_call = mock.call(
                    '/config/changes/config_change_', serialized_config_change,
                    None, False, True, False)
                assert mock_client.return_value.create.call_args_list == [
                    expected_create_call
                ]
コード例 #13
0
    def test_set_broker_config_kafka_10(self, mock_client):
        with mock.patch.object(ZK, 'set', autospec=True) as mock_set:
            with ZK(self.cluster_config) as zk:
                config = {
                    "version": 1,
                    "config": {
                        "leader.replication.throttled.rate": "42"
                    }
                }
                config_change = {"entity_path": "brokers/0", "version": 2}

                zk.set_broker_config(0, config)

                serialized_config = dump_json(config)
                serialized_config_change = dump_json(config_change)

                mock_set.assert_called_once_with(
                    zk,
                    '/config/brokers/0',
                    serialized_config,
                )

                expected_create_call = mock.call(
                    '/config/changes/config_change_', serialized_config_change,
                    None, False, True, False)
                assert mock_client.return_value.create.call_args_list == [
                    expected_create_call
                ]
コード例 #14
0
 def test_get_brokers_names_only(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.get_children = mock.Mock(return_value=[1, 2, 3], )
         expected = {1: None, 2: None, 3: None}
         actual = zk.get_brokers(names_only=True)
         zk.get_children.assert_called_with("/brokers/ids")
         assert actual == expected
コード例 #15
0
 def test_get_brokers_empty_cluster(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.get_children = mock.Mock(side_effect=NoNodeError())
         actual_with_no_node_error = zk.get_brokers()
         expected_with_no_node_error = {}
         zk.get_children.assert_called_with("/brokers/ids")
         assert actual_with_no_node_error == expected_with_no_node_error
コード例 #16
0
def step_impl3(context):
    cluster_config = get_cluster_config()
    with ZK(cluster_config) as zk:
        current_config = zk.get_topic_config(context.topic)
        current_config['config']['max.message.bytes'] = '1000'
        zk.set_topic_config(context.topic, value=current_config)
    time.sleep(
        2)  # sleeping for 2 seconds to ensure config is actually picked up
コード例 #17
0
 def test_get_topic_config(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.zk.get = mock.Mock(return_value=(
             b'{"version": 1, "config": {"cleanup.policy": "compact"}}',
             "Random node info that doesn't matter"))
         actual = zk.get_topic_config("some_topic")
         expected = {"version": 1, "config": {"cleanup.policy": "compact"}}
         assert actual == expected
コード例 #18
0
def get_broker_list(cluster_config):
    """Returns a dictionary of brokers in the form {id: host}

    :param cluster_config: the configuration of the cluster
    :type cluster_config: map
    """
    with ZK(cluster_config) as zk:
        brokers = sorted(zk.get_brokers().items(), key=itemgetter(0))
        return [(id, data['host']) for id, data in brokers]
コード例 #19
0
    def test_get_brokers_with_metadata_for_plaintext(self, mock_client):
        with ZK(self.cluster_config) as zk:
            zk.get_children = mock.Mock(return_value=[1], )

            zk.get = mock.Mock(
                return_value=(b'{"endpoints":[],"host":"broker"}', None))
            expected = {1: {'host': 'broker'}}
            actual = zk.get_brokers()
            assert actual[1]['host'] == expected[1]['host']
コード例 #20
0
    def test_get_brokers_with_metadata_for_sasl(self, mock_client):
        with ZK(self.cluster_config) as zk:
            zk.get_children = mock.Mock(return_value=[1], )

            zk.get = mock.Mock(return_value=(
                b'{"endpoints":["PLAINTEXTSASL://broker:9093"],"host":null}',
                None))
            expected = {1: {'host': 'broker'}}
            actual = zk.get_brokers()
            assert actual[1]['host'] == expected[1]['host']
コード例 #21
0
 def test_get_brokers_with_metadata(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.get_children = mock.Mock(return_value=[1, 2, 3], )
         zk.get_broker_metadata = mock.Mock(return_value='broker', )
         expected = {1: 'broker', 2: 'broker', 3: 'broker'}
         actual = zk.get_brokers()
         zk.get_children.assert_called_with("/brokers/ids")
         calls = zk.get_broker_metadata.mock_calls
         zk.get_broker_metadata.assert_has_calls(calls)
         assert actual == expected
コード例 #22
0
ファイル: list_groups.py プロジェクト: me2d/kafka-utils
 def get_zookeeper_groups(cls, cluster_config):
     '''Get the group_id of groups committed into Zookeeper.'''
     with ZK(cluster_config) as zk:
         try:
             return zk.get_children("/consumers")
         except NoNodeError:
             print(
                 "Error: No consumers node found in zookeeper",
                 file=sys.stderr,
             )
コード例 #23
0
    def test_get_nonexistent_topic_config(self, mock_client):
        """
        Test getting configuration for topics that don't exist.
        """

        with ZK(self.cluster_config) as zk:
            zk.zk.get = mock.Mock(side_effect=NoNodeError())
            zk.get_topics = mock.Mock(return_value={})
            with pytest.raises(NoNodeError):
                zk.get_topic_config("some_topic")
コード例 #24
0
 def test_delete(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.delete('/kafka/consumers/some_group/offsets', )
         zk.delete('/kafka/consumers/some_group/offsets', recursive=True)
         call_list = [
             mock.call('/kafka/consumers/some_group/offsets',
                       recursive=False),
             mock.call('/kafka/consumers/some_group/offsets',
                       recursive=True),
         ]
         assert mock_client.return_value.delete.call_args_list == call_list
コード例 #25
0
 def test_set(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.set('config/topics/some_topic', 'some_val')
         zk.set('brokers/topics/some_topic',
                '{"name": "some_topic", "more": "properties"}')
         call_list = [
             mock.call('config/topics/some_topic', 'some_val'),
             mock.call('brokers/topics/some_topic',
                       '{"name": "some_topic", "more": "properties"}')
         ]
         assert mock_client.return_value.set.call_args_list == call_list
コード例 #26
0
    def run(cls, args, cluster_config):
        # Setup the Kafka client
        client = KafkaToolClient(cluster_config.broker_list)
        client.load_metadata_for_topics()

        if args.topic and args.topics:
            print(
                "Error: Cannot specify --topic and --topics at the same time.",
                file=sys.stderr,
            )
            sys.exit(1)

        if args.partitions and args.topics:
            print(
                "Error: Cannot use --partitions with --topics. Use --topic "
                "instead.",
                file=sys.stderr,
            )
            sys.exit(1)

        # if topic is not None topics_dict will contain only info about that
        # topic, otherwise it will contain info about all topics for the group
        topics_dict = cls.preprocess_args(
            args.groupid,
            args.topic,
            args.partitions,
            cluster_config,
            client,
            storage=args.storage,
        )

        topics = args.topics if args.topics else (
            [args.topic] if args.topic else [])
        for topic in topics:
            if topic not in topics_dict:
                print(
                    "Error: Consumer {groupid} is not subscribed to topic:"
                    " {topic}.".format(groupid=args.groupid, topic=topic),
                    file=sys.stderr,
                )
                sys.exit(1)

        with ZK(cluster_config) as zk:
            if args.storage == 'zookeeper':
                unsubscriber = ZookeeperUnsubscriber(zk)
            else:
                unsubscriber = KafkaUnsubscriber(client)

            unsubscriber.unsubscribe_topics(
                args.groupid,
                topics,
                args.partitions,
                topics_dict,
            )
コード例 #27
0
 def test_get_broker_config(self, mock_client):
     with ZK(self.cluster_config) as zk:
         zk.zk.get = mock.Mock(
             return_value=(
                 b'{"version": 1, "config": {"leader.replication.throttled.rate": "42"}}',
                 "Random node info that doesn't matter"
             )
         )
         actual = zk.get_broker_config(0)
         expected = {"version": 1, "config": {"leader.replication.throttled.rate": "42"}}
         assert actual == expected
コード例 #28
0
    def test_get_topics(self, mock_client):
        with ZK(self.cluster_config) as zk:
            zk.zk.get = mock.Mock(
                return_value=(
                    (
                        b'{"version": "1", "partitions": {"0": [1, 0]}}',
                        MockGetTopics(31000),
                    )
                )
            )

            zk._fetch_partition_state = mock.Mock(
                return_value=(
                    (
                        b'{"version": "2"}',
                        MockGetTopics(32000),
                    )
                )
            )

            actual_with_fetch_state = zk.get_topics("some_topic")
            expected_with_fetch_state = {
                'some_topic': {
                    'ctime': 31.0,
                    'partitions': {
                        '0': {
                            'replicas': [1, 0],
                            'ctime': 32.0,
                            'version': '2',
                        },
                    },
                    'version': '1',
                },
            }
            assert actual_with_fetch_state == expected_with_fetch_state

            zk._fetch_partition_info = mock.Mock(
                return_value=MockGetTopics(33000)
            )

            actual_without_fetch_state = zk.get_topics("some_topic", fetch_partition_state=False)
            expected_without_fetch_state = {
                'some_topic': {
                    'ctime': 31.0,
                    'partitions': {
                        '0': {
                            'replicas': [1, 0],
                            'ctime': 33.0,
                        },
                    },
                    'version': '1',
                },
            }
            assert actual_without_fetch_state == expected_without_fetch_state
コード例 #29
0
ファイル: main.py プロジェクト: yzlzr227/kafka-utils
def get_broker_list(cluster_config):
    """Returns a dictionary of brokers in the form {id: host}

    :param cluster_config: the configuration of the cluster
    :type cluster_config: kafka_utils.utils.config.ClusterConfig
    :returns: all the brokers in the cluster
    :rtype: map of (broker_id, host) pairs
    """
    with ZK(cluster_config) as zk:
        brokers = sorted(list(zk.get_brokers().items()), key=itemgetter(0))
        return [(int(id), data['host']) for id, data in brokers]
コード例 #30
0
    def test_get_topic_config_8(self, mock_client):
        """
        Test getting configuration for topics created in Kafa prior to 0.9.0.
        """

        with ZK(self.cluster_config) as zk:
            zk.zk.get = mock.Mock(side_effect=NoNodeError())
            zk.get_topics = mock.Mock(return_value={"some_topic": {}})
            actual = zk.get_topic_config("some_topic")
            expected = {"config": {}}
            assert actual == expected