Beispiel #1
0
def delete_topic():
    """
    Delete the specified topic
    """
    if ARG.server:
        ARG.server += ':9092'
    else:
        ARG.server = ','.join(SERVER['Kafka']['broker_list'])
    client = KafkaClient(bootstrap_servers=ARG.server)
    try:
        topic_req = admin.DeleteTopicsRequest_v1(topics=[ARG.topic],
                                                 timeout=1000)
        future = client.send(client.least_loaded_node(), topic_req)
        client.poll(timeout_ms=100, future=future)
        result = future.value
        LOGGER.debug(result)
        error_code = result.topic_error_codes[0][1]
        if error_code:
            LOGGER.critical('Could not delete topic %s, error code=%d',
                            ARG.topic, error_code)
            sys.exit(error_code)
        else:
            print("Deleted topic %s" % (ARG.topic))
    except KafkaError:
        LOGGER.critical("Could not delete topic %s", ARG.topic)
def test_send(conn):
    cli = KafkaClient()
    try:
        cli.send(2, None)
    except Errors.NodeNotReadyError:
        pass
    else:
        assert False, 'NodeNotReadyError not raised'

    cli._initiate_connect(0)
    # ProduceRequest w/ 0 required_acks -> no response
    request = ProduceRequest(0, 0, [])
    ret = cli.send(0, request)
    assert conn.send.called_with(request, expect_response=False)
    assert isinstance(ret, Future)

    request = MetadataRequest([])
    cli.send(0, request)
    assert conn.send.called_with(request, expect_response=True)
Beispiel #3
0
def test_send(conn):
    cli = KafkaClient()
    try:
        cli.send(2, None)
    except Errors.NodeNotReadyError:
        pass
    else:
        assert False, 'NodeNotReadyError not raised'

    cli._initiate_connect(0)
    # ProduceRequest w/ 0 required_acks -> no response
    request = ProduceRequest(0, 0, [])
    ret = cli.send(0, request)
    assert conn.send.called_with(request, expect_response=False)
    assert isinstance(ret, Future)

    request = MetadataRequest([])
    cli.send(0, request)
    assert conn.send.called_with(request, expect_response=True)
def test_send(conn):
    cli = KafkaClient()

    # Send to unknown node => raises AssertionError
    try:
        cli.send(2, None)
        assert False, 'Exception not raised'
    except AssertionError:
        pass

    # Send to disconnected node => NodeNotReady
    conn.state = ConnectionStates.DISCONNECTED
    f = cli.send(0, None)
    assert f.failed()
    assert isinstance(f.exception, Errors.NodeNotReadyError)

    conn.state = ConnectionStates.CONNECTED
    cli._maybe_connect(0)
    # ProduceRequest w/ 0 required_acks -> no response
    request = ProduceRequest[0](0, 0, [])
    ret = cli.send(0, request)
    assert conn.send.called_with(request, expect_response=False)
    assert isinstance(ret, Future)

    request = MetadataRequest[0]([])
    cli.send(0, request)
    assert conn.send.called_with(request, expect_response=True)
def test_send(conn):
    cli = KafkaClient()

    # Send to unknown node => raises AssertionError
    try:
        cli.send(2, None)
        assert False, 'Exception not raised'
    except AssertionError:
        pass

    # Send to disconnected node => NodeNotReady
    conn.state = ConnectionStates.DISCONNECTED
    f = cli.send(0, None)
    assert f.failed()
    assert isinstance(f.exception, Errors.NodeNotReadyError)

    conn.state = ConnectionStates.CONNECTED
    cli._maybe_connect(0)
    # ProduceRequest w/ 0 required_acks -> no response
    request = ProduceRequest[0](0, 0, [])
    ret = cli.send(0, request)
    assert conn.send.called_with(request, expect_response=False)
    assert isinstance(ret, Future)

    request = MetadataRequest[0]([])
    cli.send(0, request)
    assert conn.send.called_with(request, expect_response=True)
def ensure_topic(topic,
                 num_partitions,
                 replication_factor,
                 logger,
                 timeout_ms=3000,
                 brokers='localhost'):
    logger.debug('checking kafka for topic ' + topic)
    client = KafkaClient(bootstrap_servers=brokers)

    if topic not in client.cluster.topics(exclude_internal_topics=True):
        logger.debug('creating kafka topic ' + topic)

        request = admin.CreateTopicsRequest_v0(
            create_topic_requests=[(
                topic,
                num_partitions,
                replication_factor,
                [],  # Partition assignment
                [],  # Configs
            )],
            timeout=timeout_ms)
        future = client.send(client.least_loaded_node(), request)
        client.poll(timeout_ms=timeout_ms, future=future)
        result = future.value
        error_code = result.topic_errors[0][1]
        # 0: success
        # 36: already exists, check topic
        if error_code == 0:
            logger.debug('kafka topic ' + topic + ' created')
            return
        elif error_code != 36:
            logger.error('error creating kafka topic ' + topic)
            raise Exception(
                'Unknown error code during creation of topic `{}`: {}'.format(
                    topic, error_code))

    else:
        logger.debug('kafka topic ' + topic + ' exists')
Beispiel #7
0
class OffsetsFetcherAsync(object):

    DEFAULT_CONFIG = {
        'session_timeout_ms': 30000,
        'heartbeat_interval_ms': 3000,
        'retry_backoff_ms': 100,
        'api_version': (0, 9),
        'metric_group_prefix': ''
    }

    def __init__(self, **configs):
        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)
        self._client = KafkaClient(**self.config)
        self._coordinator_id = None
        self.group_id = configs['group_id']
        self.topic = configs['topic']

    def _ensure_coordinator_known(self):
        """Block until the coordinator for this group is known
        (and we have an active connection -- java client uses unsent queue).
        """
        while self._coordinator_unknown():

            # Prior to 0.8.2 there was no group coordinator
            # so we will just pick a node at random and treat
            # it as the "coordinator"
            if self.config['api_version'] < (0, 8, 2):
                self._coordinator_id = self._client.least_loaded_node()
                self._client.ready(self._coordinator_id)
                continue

            future = self._send_group_coordinator_request()
            self._client.poll(future=future)

            if future.failed():
                if isinstance(future.exception,
                              Errors.GroupCoordinatorNotAvailableError):
                    continue
                elif future.retriable():
                    metadata_update = self._client.cluster.request_update()
                    self._client.poll(future=metadata_update)
                else:
                    raise future.exception  # pylint: disable-msg=raising-bad-type

    def _coordinator_unknown(self):
        """Check if we know who the coordinator is and have an active connection

        Side-effect: reset _coordinator_id to None if connection failed

        Returns:
            bool: True if the coordinator is unknown
        """
        if self._coordinator_id is None:
            return True

        if self._client.is_disconnected(self._coordinator_id):
            self._coordinator_dead()
            return True

        return False

    def _coordinator_dead(self, error=None):
        """Mark the current coordinator as dead."""
        if self._coordinator_id is not None:
            log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
                        self._coordinator_id, self.group_id, error)
            self._coordinator_id = None

    def _send_group_coordinator_request(self):
        """Discover the current coordinator for the group.

        Returns:
            Future: resolves to the node id of the coordinator
        """
        node_id = self._client.least_loaded_node()
        if node_id is None:
            return Future().failure(Errors.NoBrokersAvailable())

        log.debug("Sending group coordinator request for group %s to broker %s",
                  self.group_id, node_id)
        request = GroupCoordinatorRequest[0](self.group_id)
        future = Future()
        _f = self._client.send(node_id, request)
        _f.add_callback(self._handle_group_coordinator_response, future)
        _f.add_errback(self._failed_request, node_id, request, future)
        return future

    def _handle_group_coordinator_response(self, future, response):
        log.debug("Received group coordinator response %s", response)
        if not self._coordinator_unknown():
            # We already found the coordinator, so ignore the request
            log.debug("Coordinator already known -- ignoring metadata response")
            future.success(self._coordinator_id)
            return

        error_type = Errors.for_code(response.error_code)
        if error_type is Errors.NoError:
            ok = self._client.cluster.add_group_coordinator(self.group_id, response)
            if not ok:
                # This could happen if coordinator metadata is different
                # than broker metadata
                future.failure(Errors.IllegalStateError())
                return

            self._coordinator_id = response.coordinator_id
            log.info("Discovered coordinator %s for group %s",
                     self._coordinator_id, self.group_id)
            self._client.ready(self._coordinator_id)
            future.success(self._coordinator_id)
        elif error_type is Errors.GroupCoordinatorNotAvailableError:
            log.debug("Group Coordinator Not Available; retry")
            future.failure(error_type())
        elif error_type is Errors.GroupAuthorizationFailedError:
            error = error_type(self.group_id)
            log.error("Group Coordinator Request failed: %s", error)
            future.failure(error)
        else:
            error = error_type()
            log.error("Unrecognized failure in Group Coordinator Request: %s",
                      error)
            future.failure(error)

    def _failed_request(self, node_id, request, future, error):
        log.error('Error sending %s to node %s [%s]',
                  request.__class__.__name__, node_id, error)
        # Marking coordinator dead
        # unless the error is caused by internal client pipelining
        if not isinstance(error, (Errors.NodeNotReadyError,
                                  Errors.TooManyInFlightRequests)):
            self._coordinator_dead()
        future.failure(error)

    def offsets(self, partitions, timestamp):
        """Fetch a single offset before the given timestamp for the set of partitions.

        Blocks until offset is obtained, or a non-retriable exception is raised

        Arguments:
            partitions (iterable of TopicPartition) The partition that needs fetching offset.
            timestamp (int): timestamp for fetching offset. -1 for the latest
                available, -2 for the earliest available. Otherwise timestamp
                is treated as epoch seconds.

        Returns:
            dict: TopicPartition and message offsets
        """
        retries = 3
        while retries > 0:
            offsets = {}
            for future in self._send_offset_request(partitions, timestamp):
                self._client.poll(future=future)

                if future.succeeded():
                    for tp, offset in future.value:
                        offsets[tp] = offset
                    continue

                if not future.retriable():
                    raise future.exception  # pylint: disable-msg=raising-bad-type

                if future.exception.invalid_metadata:
                    refresh_future = self._client.cluster.request_update()
                    self._client.poll(future=refresh_future, sleep=True)
                    log.warning("Got exception %s and kept the loop", future.exception)
            if offsets:
                return offsets
            retries -= 1
            log.warning("Retrying the offsets fetch loop (%d retries left)", retries)
        log.error("Unsuccessful offsets retrieval")
        return {}

    def _send_offset_request(self, partitions, timestamp):
        """Fetch a single offset before the given timestamp for the partition.

        Arguments:
            partitions iterable of TopicPartition: partitions that needs fetching offset
            timestamp (int): timestamp for fetching offset

        Returns:
            list of Future: resolves to the corresponding offset
        """
        topic = partitions[0].topic
        nodes_per_partitions = {}
        for partition in partitions:
            node_id = self._client.cluster.leader_for_partition(partition)
            if node_id is None:
                log.debug("Partition %s is unknown for fetching offset,"
                          " wait for metadata refresh", partition)
                return [Future().failure(Errors.StaleMetadata(partition))]
            elif node_id == -1:
                log.debug("Leader for partition %s unavailable for fetching offset,"
                          " wait for metadata refresh", partition)
                return [Future().failure(Errors.LeaderNotAvailableError(partition))]
            nodes_per_partitions.setdefault(node_id, []).append(partition)

        # Client returns a future that only fails on network issues
        # so create a separate future and attach a callback to update it
        # based on response error codes

        futures = []
        for node_id, partitions in six.iteritems(nodes_per_partitions):
            request = OffsetRequest[0](
                -1, [(topic, [(partition.partition, timestamp, 1) for partition in partitions])]
            )
            future_request = Future()
            _f = self._client.send(node_id, request)
            _f.add_callback(self._handle_offset_response, partitions, future_request)

            def errback(e):
                log.error("Offset request errback error %s", e)
                future_request.failure(e)
            _f.add_errback(errback)
            futures.append(future_request)

        return futures

    def _handle_offset_response(self, partitions, future, response):
        """Callback for the response of the list offset call above.

        Arguments:
            partition (TopicPartition): The partition that was fetched
            future (Future): the future to update based on response
            response (OffsetResponse): response from the server

        Raises:
            AssertionError: if response does not match partition
        """
        topic, partition_info = response.topics[0]
        assert len(response.topics) == 1, (
            'OffsetResponse should only be for a single topic')
        partition_ids = set([part.partition for part in partitions])
        result = []
        for pi in partition_info:
            part, error_code, offsets = pi
            assert topic == partitions[0].topic and part in partition_ids, (
                'OffsetResponse partition does not match OffsetRequest partition')
            error_type = Errors.for_code(error_code)
            if error_type is Errors.NoError:
                assert len(offsets) == 1, 'Expected OffsetResponse with one offset'
                log.debug("Fetched offset %s for partition %d", offsets[0], part)
                result.append((TopicPartition(topic, part), offsets[0]))
            elif error_type in (Errors.NotLeaderForPartitionError,
                                Errors.UnknownTopicOrPartitionError):
                log.debug("Attempt to fetch offsets for partition %s failed due"
                          " to obsolete leadership information, retrying.",
                          str(partitions))
                future.failure(error_type(partitions))
            else:
                log.warning("Attempt to fetch offsets for partition %s failed due to:"
                            " %s", partitions, error_type)
                future.failure(error_type(partitions))
        future.success(result)

    def fetch_committed_offsets(self, partitions):
        """Fetch the current committed offsets for specified partitions

        Arguments:
            partitions (list of TopicPartition): partitions to fetch

        Returns:
            dict: {TopicPartition: OffsetAndMetadata}
        """
        if not partitions:
            return {}

        while True:
            self._ensure_coordinator_known()

            # contact coordinator to fetch committed offsets
            future = self._send_offset_fetch_request(partitions)
            self._client.poll(future=future)

            if future.succeeded():
                return future.value

            if not future.retriable():
                raise future.exception  # pylint: disable-msg=raising-bad-type

            time.sleep(self.config['retry_backoff_ms'] / 1000.0)

    def _send_offset_fetch_request(self, partitions):
        """Fetch the committed offsets for a set of partitions.

        This is a non-blocking call. The returned future can be polled to get
        the actual offsets returned from the broker.

        Arguments:
            partitions (list of TopicPartition): the partitions to fetch

        Returns:
            Future: resolves to dict of offsets: {TopicPartition: int}
        """
        assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
        if not partitions:
            return Future().success({})

        elif self._coordinator_unknown():
            return Future().failure(Errors.GroupCoordinatorNotAvailableError)

        node_id = self._coordinator_id

        # Verify node is ready
        if not self._client.ready(node_id):
            log.debug("Node %s not ready -- failing offset fetch request",
                      node_id)
            return Future().failure(Errors.NodeNotReadyError)

        log.debug("Group %s fetching committed offsets for partitions: %s",
                  self.group_id, partitions)
        # construct the request
        topic_partitions = collections.defaultdict(set)
        for tp in partitions:
            topic_partitions[tp.topic].add(tp.partition)

        if self.config['api_version'] >= (0, 8, 2):
            request = OffsetFetchRequest[1](
                self.group_id,
                list(topic_partitions.items())
            )
        else:
            request = OffsetFetchRequest[0](
                self.group_id,
                list(topic_partitions.items())
            )

        # send the request with a callback
        future = Future()
        _f = self._client.send(node_id, request)
        _f.add_callback(self._handle_offset_fetch_response, future)
        _f.add_errback(self._failed_request, node_id, request, future)
        return future

    def _handle_offset_fetch_response(self, future, response):
        offsets = {}
        for topic, partitions in response.topics:
            for partition, offset, metadata, error_code in partitions:
                tp = TopicPartition(topic, partition)
                error_type = Errors.for_code(error_code)
                if error_type is not Errors.NoError:
                    error = error_type()
                    log.debug("Group %s failed to fetch offset for partition"
                              " %s: %s", self.group_id, tp, error)
                    if error_type is Errors.GroupLoadInProgressError:
                        # just retry
                        future.failure(error)
                    elif error_type is Errors.NotCoordinatorForGroupError:
                        # re-discover the coordinator and retry
                        self._coordinator_dead()
                        future.failure(error)
                    elif error_type in (Errors.UnknownMemberIdError,
                                        Errors.IllegalGenerationError):
                        future.failure(error)
                    elif error_type is Errors.UnknownTopicOrPartitionError:
                        log.warning("OffsetFetchRequest -- unknown topic %s"
                                    " (have you committed any offsets yet?)",
                                    topic)
                        continue
                    else:
                        log.error("Unknown error fetching offsets for %s: %s",
                                  tp, error)
                        future.failure(error)
                    return
                elif offset >= 0:
                    # record the position with the offset
                    # (-1 indicates no committed offset to fetch)
                    offsets[tp] = OffsetAndMetadata(offset, metadata)
                else:
                    log.debug("Group %s has no committed offset for partition"
                              " %s", self.group_id, tp)
        future.success(offsets)

    def get(self):
        topic_partitions = self._client.cluster.partitions_for_topic(self.topic)
        if not topic_partitions:
            future = self._client.cluster.request_update()
            log.info("No partitions available, performing metadata update.")
            self._client.poll(future=future)
            return {}
        partitions = [TopicPartition(self.topic, partition_id) for partition_id in topic_partitions]
        offsets = self.offsets(partitions, -1)
        committed = self.fetch_committed_offsets(partitions)
        lags = {}
        for tp, offset in six.iteritems(offsets):
            commit_offset = committed[tp] if tp in committed else 0
            numerical = commit_offset if isinstance(commit_offset, int) else commit_offset.offset
            lag = offset - numerical
            pid = tp.partition if isinstance(tp, TopicPartition) else tp
            log.debug("Lag for %s (%s): %s, %s, %s", self.topic, pid, offset, commit_offset, lag)
            lags[pid] = lag
        return lags
class KafkaConsumerLag:

    def __init__(self, bootstrap_servers):

        self.client = KafkaClient(bootstrap_servers=bootstrap_servers)
        self.client.check_version()

    def _send(self, broker_id, request, response_type=None):

        f = self.client.send(broker_id, request)
        response = self.client.poll(future=f)

        if response_type:
            if response and len(response) > 0:
                for r in response:
                    if isinstance(r, response_type):
                        return r
        else:
            if response and len(response) > 0:
                return response[0]

        return None

    def check(self, group_topics=None, discovery=None):
        """
        {
            "<group>": {
                "state": <str>,
                "topics": {
                    "<topic>": {
                        "consumer_lag": <int>,
                        "partitions": {
                            "<partition>": {
                                "offset_first": <int>,
                                "offset_consumed": <int>,
                                "offset_last": <int>,
                                "lag": <int>
                            }
                        }
                    }
                }
            }
        }
        :param persist_groups:
        :return: consumer statistics
        """
        cluster = self.client.cluster
        brokers = cluster.brokers()

        # Consumer group ID -> list(topics)
        if group_topics is None:
            group_topics = {}

            if discovery is None:
                discovery = True
        else:
            group_topics = copy.deepcopy(group_topics)

        # Set of consumer group IDs
        consumer_groups = set(group_topics.iterkeys())

        # Set of all known topics
        topics = set(itertools.chain(*group_topics.itervalues()))

        # Consumer group ID -> coordinating broker
        consumer_coordinator = {}

        # Coordinating broker - > list(consumer group IDs)
        coordinator_consumers = {}

        results = {}

        for consumer_group in group_topics.iterkeys():
            results[consumer_group] = {'state': None, 'topics': {}}

        # Ensure connections to all brokers
        for broker in brokers:
            while not self.client.is_ready(broker.nodeId):
                self.client.ready(broker.nodeId)

        # Collect all active consumer groups
        if discovery:
            for broker in brokers:
                response = self._send(broker.nodeId, _ListGroupsRequest(), _ListGroupsResponse)

                if response:
                    for group in response.groups:
                        consumer_groups.add(group[0])

        # Identify which broker is coordinating each consumer group
        for group in consumer_groups:

            response = self._send(next(iter(brokers)).nodeId, _GroupCoordinatorRequest(group), _GroupCoordinatorResponse)

            if response:
                consumer_coordinator[group] = response.coordinator_id

                if response.coordinator_id not in coordinator_consumers:
                    coordinator_consumers[response.coordinator_id] = []

                coordinator_consumers[response.coordinator_id].append(group)

        # Populate consumer groups into dict
        for group in consumer_groups:
            if group not in group_topics:
                group_topics[group] = []

        # Add groups to results dict
        for group, topic_list in group_topics.iteritems():
            results[group] = {'state': None, 'topics': {}}

        # Identify group information and topics read by each consumer group
        for coordinator, consumers in coordinator_consumers.iteritems():

            response = self._send(coordinator, _DescribeGroupsRequest(consumers), _DescribeGroupsResponse)

            for group in response.groups:

                if group[1] in results:
                    results[group[1]]['state'] = group[2]
                    # TODO Also include member data?

                if discovery:
                    members = group[5]
                    for member in members:
                        try:
                            assignment = MemberAssignment.decode(member[4])
                            if assignment:
                                for partition in assignment.partition_assignment:
                                    topic = partition[0]

                                    # Add topic to topic set
                                    topics.add(topic)

                                    # Add topic to group
                                    group_topics[group[1]].append(topic)
                        except:
                            pass

        # Add topics to groups in results dict
        for group, topic_list in group_topics.iteritems():
            for topic in topic_list:
                results[group]['topics'][topic] = {'consumer_lag': 0, 'partitions': {}}

        # For storing the latest offset for all partitions of all topics
        # topic -> partition -> offset
        start_offsets = {}
        end_offsets = {}

        # Identify all the topic partitions that each broker is leader for
        # and request next new offset for each partition
        for broker, partitions in cluster._broker_partitions.iteritems():

            # topic -> List(partition, time, max_offsets)
            request_partitions = {}

            for tp in partitions:
                if tp.topic in topics:
                    if tp.topic not in request_partitions:
                        request_partitions[tp.topic] = []

                    # Time value '-2' is to get the offset for first available message
                    request_partitions[tp.topic].append((tp.partition, -2, 1))

            # List(topic, List(partition, time, max_offsets))
            topic_partitions = []

            for tp in request_partitions.iteritems():
                topic_partitions.append(tp)

            # Request partition start offsets
            response = self._send(broker, _OffsetRequest(-1, topic_partitions), _OffsetResponse)

            if response:
                for offset in response.topics:
                    topic = offset[0]
                    if topic not in start_offsets:
                        start_offsets[topic] = {}

                    for p in offset[1]:
                        start_offsets[topic][p[0]] = p[2][0]

            for tp in topic_partitions:
                for i, ptm in enumerate(tp[1]):
                    # Time value '-1' is to get the offset for next new message
                    tp[1][i] = (ptm[0], -1, 1)

            # Request partition end offsets
            response = self._send(broker, _OffsetRequest(-1, topic_partitions), _OffsetResponse)

            if response:
                for offset in response.topics:
                    topic = offset[0]
                    if topic not in end_offsets:
                        end_offsets[topic] = {}

                    for p in offset[1]:
                        end_offsets[topic][p[0]] = p[2][0]

        # Populate with offset values
        for group, topics in group_topics.iteritems():

            coordinator = consumer_coordinator[group]

            # topic -> list(partition)
            request_partitions = {}

            for topic in topics:
                results[group]['topics'][topic]['consumer_lag'] = 0
                results[group]['topics'][topic]['partitions'] = {}

                if topic in start_offsets:
                    for p in start_offsets[topic]:
                        results[group]['topics'][topic]['partitions'][p] = {
                            'offset_first': start_offsets[topic][p],
                            'offset_last': end_offsets[topic][p],
                            'offset_consumed': 0,
                            'lag' : 0}

                        if topic not in request_partitions:
                            request_partitions[topic] = []
                        request_partitions[topic].append(p)

            # List(topic -> list(partition))
            topic_partitions = []

            for tp in request_partitions.iteritems():
                topic_partitions.append(tp)

            response = self._send(coordinator, _OffsetFetchRequest(group, topic_partitions), _OffsetFetchResponse)

            if response:
                for offset in response.topics:
                    topic = offset[0]
                    offsets = offset[1]

                    if topic not in results[group]['topics']:
                        continue

                    for p_offset in offsets:
                        partition = p_offset[0]
                        offset_consumed = p_offset[1]
                        p_results = results[group]['topics'][topic]['partitions'][partition]

                        if offset_consumed != -1:
                            p_results['offset_consumed'] = offset_consumed
                            p_results['lag'] = p_results['offset_last'] - offset_consumed
                        else:
                            p_results['offset_consumed'] = 0
                            p_results['lag'] = p_results['offset_last'] - p_results['offset_first']

                        results[group]['topics'][topic]['consumer_lag'] += p_results['lag']

        return results

    def close(self):
        if self.client:
            self.client.close()
Beispiel #9
0
class KafkaAdminClient(object):
    """A class for administering the Kafka cluster.

    Warning:
        This is an unstable interface that was recently added and is subject to
        change without warning. In particular, many methods currently return
        raw protocol tuples. In future releases, we plan to make these into
        nicer, more pythonic objects. Unfortunately, this will likely break
        those interfaces.

    The KafkaAdminClient class will negotiate for the latest version of each message
    protocol format supported by both the kafka-python client library and the
    Kafka broker. Usage of optional fields from protocol versions that are not
    supported by the broker will result in IncompatibleBrokerVersion exceptions.

    Use of this class requires a minimum broker version >= 0.10.0.0.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'kafka-python-{version}'
        reconnect_backoff_ms (int): The amount of time in milliseconds to
            wait before attempting to reconnect to a given host.
            Default: 50.
        reconnect_backoff_max_ms (int): The maximum amount of time in
            milliseconds to wait when reconnecting to a broker that has
            repeatedly failed to connect. If provided, the backoff per host
            will increase exponentially for each consecutive connection
            failure, up to this maximum. To avoid connection storms, a
            randomization factor of 0.2 will be applied to the backoff
            resulting in a random range between 20% below and 20% above
            the computed value. Default: 1000.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 30000.
        connections_max_idle_ms: Close idle connections after the number of
            milliseconds specified by this config. The broker closes idle
            connections after connections.max.idle.ms, so this avoids hitting
            unexpected socket disconnected errors on the client.
            Default: 540000
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        max_in_flight_requests_per_connection (int): Requests are pipelined
            to kafka brokers up to this number of maximum requests per
            broker connection. Default: 5.
        receive_buffer_bytes (int): The size of the TCP receive buffer
            (SO_RCVBUF) to use when reading data. Default: None (relies on
            system defaults). Java client defaults to 32768.
        send_buffer_bytes (int): The size of the TCP send buffer
            (SO_SNDBUF) to use when sending data. Default: None (relies on
            system defaults). Java client defaults to 131072.
        socket_options (list): List of tuple-arguments to socket.setsockopt
            to apply to broker connection sockets. Default:
            [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
            socket connections. If provided, all other ssl_* configurations
            will be ignored. Default: None.
        ssl_check_hostname (bool): Flag to configure whether SSL handshake
            should verify that the certificate matches the broker's hostname.
            Default: True.
        ssl_cafile (str): Optional filename of CA file to use in certificate
            veriication. Default: None.
        ssl_certfile (str): Optional filename of file in PEM format containing
            the client certificate, as well as any CA certificates needed to
            establish the certificate's authenticity. Default: None.
        ssl_keyfile (str): Optional filename containing the client private key.
            Default: None.
        ssl_password (str): Optional password to be used when loading the
            certificate chain. Default: None.
        ssl_crlfile (str): Optional filename containing the CRL to check for
            certificate expiration. By default, no CRL check is done. When
            providing a file, only the leaf certificate will be checked against
            this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
            Default: None.
        api_version (tuple): Specify which Kafka API version to use. If set
            to None, KafkaClient will attempt to infer the broker version by
            probing various APIs. Example: (0, 10, 2). Default: None
        api_version_auto_timeout_ms (int): number of milliseconds to throw a
            timeout exception from the constructor when checking the broker
            api version. Only applies if api_version is None
        selector (selectors.BaseSelector): Provide a specific selector
            implementation to use for I/O multiplexing.
            Default: selectors.DefaultSelector
        metrics (kafka.metrics.Metrics): Optionally provide a metrics
            instance for capturing network IO stats. Default: None.
        metric_group_prefix (str): Prefix for metric names. Default: ''
        sasl_mechanism (str): Authentication mechanism when security_protocol
            is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
            PLAIN, GSSAPI, OAUTHBEARER.
        sasl_plain_username (str): username for sasl PLAIN authentication.
            Required if sasl_mechanism is PLAIN.
        sasl_plain_password (str): password for sasl PLAIN authentication.
            Required if sasl_mechanism is PLAIN.
        sasl_kerberos_service_name (str): Service name to include in GSSAPI
            sasl mechanism handshake. Default: 'kafka'
        sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
            instance. (See kafka.oauth.abstract). Default: None

    """
    DEFAULT_CONFIG = {
        # client configs
        'bootstrap_servers': 'localhost',
        'client_id': 'kafka-python-' + __version__,
        'request_timeout_ms': 30000,
        'connections_max_idle_ms': 9 * 60 * 1000,
        'reconnect_backoff_ms': 50,
        'reconnect_backoff_max_ms': 1000,
        'max_in_flight_requests_per_connection': 5,
        'receive_buffer_bytes': None,
        'send_buffer_bytes': None,
        'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
        'sock_chunk_bytes': 4096,  # undocumented experimental option
        'sock_chunk_buffer_count': 1000,  # undocumented experimental option
        'retry_backoff_ms': 100,
        'metadata_max_age_ms': 300000,
        'security_protocol': 'PLAINTEXT',
        'ssl_context': None,
        'ssl_check_hostname': True,
        'ssl_cafile': None,
        'ssl_certfile': None,
        'ssl_keyfile': None,
        'ssl_password': None,
        'ssl_crlfile': None,
        'api_version': None,
        'api_version_auto_timeout_ms': 2000,
        'selector': selectors.DefaultSelector,
        'sasl_mechanism': None,
        'sasl_plain_username': None,
        'sasl_plain_password': None,
        'sasl_kerberos_service_name': 'kafka',
        'sasl_oauth_token_provider': None,

        # metrics configs
        'metric_reporters': [],
        'metrics_num_samples': 2,
        'metrics_sample_window_ms': 30000,
    }

    def __init__(self, **configs):
        log.debug("Starting KafkaAdminClient with configuration: %s", configs)
        extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
        if extra_configs:
            raise KafkaConfigurationError(
                "Unrecognized configs: {}".format(extra_configs))

        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)

        # Configure metrics
        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(
            samples=self.config['metrics_num_samples'],
            time_window_ms=self.config['metrics_sample_window_ms'],
            tags=metrics_tags)
        reporters = [
            reporter() for reporter in self.config['metric_reporters']
        ]
        self._metrics = Metrics(metric_config, reporters)

        self._client = KafkaClient(metrics=self._metrics,
                                   metric_group_prefix='admin',
                                   **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = self._client.config['api_version']

        self._closed = False
        self._refresh_controller_id()
        log.debug("KafkaAdminClient started.")

    def close(self):
        """Close the KafkaAdminClient connection to the Kafka broker."""
        if not hasattr(self, '_closed') or self._closed:
            log.info("KafkaAdminClient already closed.")
            return

        self._metrics.close()
        self._client.close()
        self._closed = True
        log.debug("KafkaAdminClient is now closed.")

    def _matching_api_version(self, operation):
        """Find the latest version of the protocol operation supported by both
        this library and the broker.

        This resolves to the lesser of either the latest api version this
        library supports, or the max version supported by the broker.

        :param operation: A list of protocol operation versions from kafka.protocol.
        :return: The max matching version number between client and broker.
        """
        version = min(
            len(operation) - 1,
            self._client.get_api_versions()[operation[0].API_KEY][1])
        if version < self._client.get_api_versions()[operation[0].API_KEY][0]:
            # max library version is less than min broker version. Currently,
            # no Kafka versions specify a min msg version. Maybe in the future?
            raise IncompatibleBrokerVersion(
                "No version of the '{}' Kafka protocol is supported by both the client and broker."
                .format(operation.__name__))
        return version

    def _validate_timeout(self, timeout_ms):
        """Validate the timeout is set or use the configuration default.

        :param timeout_ms: The timeout provided by api call, in milliseconds.
        :return: The timeout to use for the operation.
        """
        return timeout_ms or self.config['request_timeout_ms']

    def _refresh_controller_id(self):
        """Determine the Kafka cluster controller."""
        version = self._matching_api_version(MetadataRequest)
        if 1 <= version <= 6:
            request = MetadataRequest[version]()
            future = self._send_request_to_node(
                self._client.least_loaded_node(), request)

            self._wait_for_futures([future])

            response = future.value
            controller_id = response.controller_id
            # verify the controller is new enough to support our requests
            controller_version = self._client.check_version(controller_id)
            if controller_version < (0, 10, 0):
                raise IncompatibleBrokerVersion(
                    "The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
                    .format(controller_version))
            self._controller_id = controller_id
        else:
            raise UnrecognizedBrokerVersion(
                "Kafka Admin interface cannot determine the controller using MetadataRequest_v{}."
                .format(version))

    def _find_group_coordinator_id(self, group_id):
        """Find the broker node_id of the coordinator of the given group.

        Sends a FindCoordinatorRequest message to the cluster. Will block until
        the FindCoordinatorResponse is received. Any errors are immediately
        raised.

        :param group_id: The consumer group ID. This is typically the group
            name as a string.
        :return: The node_id of the broker that is the coordinator.
        """
        # Note: Java may change how this is implemented in KAFKA-6791.
        #
        # TODO add support for dynamically picking version of
        # GroupCoordinatorRequest which was renamed to FindCoordinatorRequest.
        # When I experimented with this, GroupCoordinatorResponse_v1 didn't
        # match GroupCoordinatorResponse_v0 and I couldn't figure out why.
        gc_request = GroupCoordinatorRequest[0](group_id)
        future = self._send_request_to_node(self._client.least_loaded_node(),
                                            gc_request)

        self._wait_for_futures([future])

        gc_response = future.value
        # use the extra error checking in add_group_coordinator() rather than
        # immediately returning the group coordinator.
        success = self._client.cluster.add_group_coordinator(
            group_id, gc_response)
        if not success:
            error_type = Errors.for_code(gc_response.error_code)
            assert error_type is not Errors.NoError
            # Note: When error_type.retriable, Java will retry... see
            # KafkaAdminClient's handleFindCoordinatorError method
            raise error_type(
                "Could not identify group coordinator for group_id '{}' from response '{}'."
                .format(group_id, gc_response))
        group_coordinator = self._client.cluster.coordinator_for_group(
            group_id)
        # will be None if the coordinator was never populated, which should never happen here
        assert group_coordinator is not None
        # will be -1 if add_group_coordinator() failed... but by this point the
        # error should have been raised.
        assert group_coordinator != -1
        return group_coordinator

    def _send_request_to_node(self, node_id, request):
        """Send a Kafka protocol message to a specific broker.

        Returns a future that may be polled for status and results.

        :param node_id: The broker id to which to send the message.
        :param request: The message to send.
        :return: A future object that may be polled for status and results.
        :exception: The exception if the message could not be sent.
        """
        while not self._client.ready(node_id):
            # poll until the connection to broker is ready, otherwise send()
            # will fail with NodeNotReadyError
            self._client.poll()
        return self._client.send(node_id, request)

    def _send_request_to_controller(self, request):
        """Send a Kafka protocol message to the cluster controller.

        Will block until the message result is received.

        :param request: The message to send.
        :return: The Kafka protocol response for the message.
        """
        tries = 2  # in case our cached self._controller_id is outdated
        while tries:
            tries -= 1
            future = self._send_request_to_node(self._controller_id, request)

            self._wait_for_futures([future])

            response = future.value
            # In Java, the error fieldname is inconsistent:
            #  - CreateTopicsResponse / CreatePartitionsResponse uses topic_errors
            #  - DeleteTopicsResponse uses topic_error_codes
            # So this is a little brittle in that it assumes all responses have
            # one of these attributes and that they always unpack into
            # (topic, error_code) tuples.
            topic_error_tuples = (response.topic_errors if hasattr(
                response, 'topic_errors') else response.topic_error_codes)
            # Also small py2/py3 compatibility -- py3 can ignore extra values
            # during unpack via: for x, y, *rest in list_of_values. py2 cannot.
            # So for now we have to map across the list and explicitly drop any
            # extra values (usually the error_message)
            for topic, error_code in map(lambda e: e[:2], topic_error_tuples):
                error_type = Errors.for_code(error_code)
                if tries and error_type is NotControllerError:
                    # No need to inspect the rest of the errors for
                    # non-retriable errors because NotControllerError should
                    # either be thrown for all errors or no errors.
                    self._refresh_controller_id()
                    break
                elif error_type is not Errors.NoError:
                    raise error_type(
                        "Request '{}' failed with response '{}'.".format(
                            request, response))
            else:
                return response
        raise RuntimeError(
            "This should never happen, please file a bug with full stacktrace if encountered"
        )

    @staticmethod
    def _convert_new_topic_request(new_topic):
        return (
            new_topic.name, new_topic.num_partitions,
            new_topic.replication_factor, [
                (partition_id, replicas) for partition_id, replicas in
                new_topic.replica_assignments.items()
            ],
            [(config_key, config_value)
             for config_key, config_value in new_topic.topic_configs.items()])

    def create_topics(self, new_topics, timeout_ms=None, validate_only=False):
        """Create new topics in the cluster.

        :param new_topics: A list of NewTopic objects.
        :param timeout_ms: Milliseconds to wait for new topics to be created
            before the broker returns.
        :param validate_only: If True, don't actually create new topics.
            Not supported by all versions. Default: False
        :return: Appropriate version of CreateTopicResponse class.
        """
        version = self._matching_api_version(CreateTopicsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version == 0:
            if validate_only:
                raise IncompatibleBrokerVersion(
                    "validate_only requires CreateTopicsRequest >= v1, which is not supported by Kafka {}."
                    .format(self.config['api_version']))
            request = CreateTopicsRequest[version](create_topic_requests=[
                self._convert_new_topic_request(new_topic)
                for new_topic in new_topics
            ],
                                                   timeout=timeout_ms)
        elif version <= 2:
            request = CreateTopicsRequest[version](create_topic_requests=[
                self._convert_new_topic_request(new_topic)
                for new_topic in new_topics
            ],
                                                   timeout=timeout_ms,
                                                   validate_only=validate_only)
        else:
            raise NotImplementedError(
                "Support for CreateTopics v{} has not yet been added to KafkaAdminClient."
                .format(version))
        # TODO convert structs to a more pythonic interface
        # TODO raise exceptions if errors
        return self._send_request_to_controller(request)

    def delete_topics(self, topics, timeout_ms=None):
        """Delete topics from the cluster.

        :param topics: A list of topic name strings.
        :param timeout_ms: Milliseconds to wait for topics to be deleted
            before the broker returns.
        :return: Appropriate version of DeleteTopicsResponse class.
        """
        version = self._matching_api_version(DeleteTopicsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version <= 1:
            request = DeleteTopicsRequest[version](topics=topics,
                                                   timeout=timeout_ms)
            response = self._send_request_to_controller(request)
        else:
            raise NotImplementedError(
                "Support for DeleteTopics v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return response

    # list topics functionality is in ClusterMetadata
    # Note: if implemented here, send the request to the least_loaded_node()

    # describe topics functionality is in ClusterMetadata
    # Note: if implemented here, send the request to the controller

    # describe cluster functionality is in ClusterMetadata
    # Note: if implemented here, send the request to the least_loaded_node()

    # describe_acls protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # create_acls protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # delete_acls protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    @staticmethod
    def _convert_describe_config_resource_request(config_resource):
        return (config_resource.resource_type, config_resource.name, [
            config_key
            for config_key, config_value in config_resource.configs.items()
        ] if config_resource.configs else None)

    def describe_configs(self, config_resources, include_synonyms=False):
        """Fetch configuration parameters for one or more Kafka resources.

        :param config_resources: An list of ConfigResource objects.
            Any keys in ConfigResource.configs dict will be used to filter the
            result. Setting the configs dict to None will get all values. An
            empty dict will get zero values (as per Kafka protocol).
        :param include_synonyms: If True, return synonyms in response. Not
            supported by all versions. Default: False.
        :return: Appropriate version of DescribeConfigsResponse class.
        """
        version = self._matching_api_version(DescribeConfigsRequest)
        if version == 0:
            if include_synonyms:
                raise IncompatibleBrokerVersion(
                    "include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}."
                    .format(self.config['api_version']))
            request = DescribeConfigsRequest[version](resources=[
                self._convert_describe_config_resource_request(config_resource)
                for config_resource in config_resources
            ])
        elif version == 1:
            request = DescribeConfigsRequest[version](
                resources=[
                    self._convert_describe_config_resource_request(
                        config_resource)
                    for config_resource in config_resources
                ],
                include_synonyms=include_synonyms)
        else:
            raise NotImplementedError(
                "Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient."
                .format(version))
        future = self._send_request_to_node(self._client.least_loaded_node(),
                                            request)

        self._wait_for_futures([future])

        return future.value

    @staticmethod
    def _convert_alter_config_resource_request(config_resource):
        return (config_resource.resource_type, config_resource.name, [
            (config_key, config_value)
            for config_key, config_value in config_resource.configs.items()
        ])

    def alter_configs(self, config_resources):
        """Alter configuration parameters of one or more Kafka resources.

        Warning:
            This is currently broken for BROKER resources because those must be
            sent to that specific broker, versus this always picks the
            least-loaded node. See the comment in the source code for details.
            We would happily accept a PR fixing this.

        :param config_resources: A list of ConfigResource objects.
        :return: Appropriate version of AlterConfigsResponse class.
        """
        version = self._matching_api_version(AlterConfigsRequest)
        if version == 0:
            request = AlterConfigsRequest[version](resources=[
                self._convert_alter_config_resource_request(config_resource)
                for config_resource in config_resources
            ])
        else:
            raise NotImplementedError(
                "Support for AlterConfigs v{} has not yet been added to KafkaAdminClient."
                .format(version))
        # TODO the Java client has the note:
        # // We must make a separate AlterConfigs request for every BROKER resource we want to alter
        # // and send the request to that specific broker. Other resources are grouped together into
        # // a single request that may be sent to any broker.
        #
        # So this is currently broken as it always sends to the least_loaded_node()
        future = self._send_request_to_node(self._client.least_loaded_node(),
                                            request)

        self._wait_for_futures([future])

        return future.value

    # alter replica logs dir protocol not yet implemented
    # Note: have to lookup the broker with the replica assignment and send the request to that broker

    # describe log dirs protocol not yet implemented
    # Note: have to lookup the broker with the replica assignment and send the request to that broker

    @staticmethod
    def _convert_create_partitions_request(topic_name, new_partitions):
        return (topic_name, (new_partitions.total_count,
                             new_partitions.new_assignments))

    def create_partitions(self,
                          topic_partitions,
                          timeout_ms=None,
                          validate_only=False):
        """Create additional partitions for an existing topic.

        :param topic_partitions: A map of topic name strings to NewPartition objects.
        :param timeout_ms: Milliseconds to wait for new partitions to be
            created before the broker returns.
        :param validate_only: If True, don't actually create new partitions.
            Default: False
        :return: Appropriate version of CreatePartitionsResponse class.
        """
        version = self._matching_api_version(CreatePartitionsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version == 0:
            request = CreatePartitionsRequest[version](
                topic_partitions=[
                    self._convert_create_partitions_request(
                        topic_name, new_partitions)
                    for topic_name, new_partitions in topic_partitions.items()
                ],
                timeout=timeout_ms,
                validate_only=validate_only)
        else:
            raise NotImplementedError(
                "Support for CreatePartitions v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return self._send_request_to_controller(request)

    # delete records protocol not yet implemented
    # Note: send the request to the partition leaders

    # create delegation token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # renew delegation token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # expire delegation_token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # describe delegation_token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    def describe_consumer_groups(self, group_ids, group_coordinator_id=None):
        """Describe a set of consumer groups.

        Any errors are immediately raised.

        :param group_ids: A list of consumer group IDs. These are typically the
            group names as strings.
        :param group_coordinator_id: The node_id of the groups' coordinator
            broker. If set to None, it will query the cluster for each group to
            find that group's coordinator. Explicitly specifying this can be
            useful for avoiding extra network round trips if you already know
            the group coordinator. This is only useful when all the group_ids
            have the same coordinator, otherwise it will error. Default: None.
        :return: A list of group descriptions. For now the group descriptions
            are the raw results from the DescribeGroupsResponse. Long-term, we
            plan to change this to return namedtuples as well as decoding the
            partition assignments.
        """
        group_descriptions = []
        futures = []
        version = self._matching_api_version(DescribeGroupsRequest)
        for group_id in group_ids:
            if group_coordinator_id is not None:
                this_groups_coordinator_id = group_coordinator_id
            else:
                this_groups_coordinator_id = self._find_group_coordinator_id(
                    group_id)

            if version <= 1:
                # Note: KAFKA-6788 A potential optimization is to group the
                # request per coordinator and send one request with a list of
                # all consumer groups. Java still hasn't implemented this
                # because the error checking is hard to get right when some
                # groups error and others don't.
                request = DescribeGroupsRequest[version](groups=(group_id, ))
                futures.append(
                    self._send_request_to_node(this_groups_coordinator_id,
                                               request))
            else:
                raise NotImplementedError(
                    "Support for DescribeGroups v{} has not yet been added to KafkaAdminClient."
                    .format(version))

        self._wait_for_futures(futures)

        for future in futures:
            response = future.value
            assert len(response.groups) == 1
            # TODO need to implement converting the response tuple into
            # a more accessible interface like a namedtuple and then stop
            # hardcoding tuple indices here. Several Java examples,
            # including KafkaAdminClient.java
            group_description = response.groups[0]
            error_code = group_description[0]
            error_type = Errors.for_code(error_code)
            # Java has the note: KAFKA-6789, we can retry based on the error code
            if error_type is not Errors.NoError:
                raise error_type(
                    "Request '{}' failed with response '{}'.".format(
                        request, response))
            # TODO Java checks the group protocol type, and if consumer
            # (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
            # the members' partition assignments... that hasn't yet been
            # implemented here so just return the raw struct results
            group_descriptions.append(group_description)

        return group_descriptions

    def list_consumer_groups(self, broker_ids=None):
        """List all consumer groups known to the cluster.

        This returns a list of Consumer Group tuples. The tuples are
        composed of the consumer group name and the consumer group protocol
        type.

        Only consumer groups that store their offsets in Kafka are returned.
        The protocol type will be an empty string for groups created using
        Kafka < 0.9 APIs because, although they store their offsets in Kafka,
        they don't use Kafka for group coordination. For groups created using
        Kafka >= 0.9, the protocol type will typically be "consumer".

        As soon as any error is encountered, it is immediately raised.

        :param broker_ids: A list of broker node_ids to query for consumer
            groups. If set to None, will query all brokers in the cluster.
            Explicitly specifying broker(s) can be useful for determining which
            consumer groups are coordinated by those broker(s). Default: None
        :return list: List of tuples of Consumer Groups.
        :exception GroupCoordinatorNotAvailableError: The coordinator is not
            available, so cannot process requests.
        :exception GroupLoadInProgressError: The coordinator is loading and
            hence can't process requests.
        """
        # While we return a list, internally use a set to prevent duplicates
        # because if a group coordinator fails after being queried, and its
        # consumer groups move to new brokers that haven't yet been queried,
        # then the same group could be returned by multiple brokers.
        consumer_groups = set()
        futures = []
        if broker_ids is None:
            broker_ids = [
                broker.nodeId for broker in self._client.cluster.brokers()
            ]
        version = self._matching_api_version(ListGroupsRequest)
        if version <= 2:
            request = ListGroupsRequest[version]()
            for broker_id in broker_ids:
                futures.append(self._send_request_to_node(broker_id, request))

            self._wait_for_futures(futures)

            for future in futures:
                response = future.value
                error_type = Errors.for_code(response.error_code)
                if error_type is not Errors.NoError:
                    raise error_type(
                        "Request '{}' failed with response '{}'.".format(
                            request, response))
                consumer_groups.update(response.groups)
        else:
            raise NotImplementedError(
                "Support for ListGroups v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return list(consumer_groups)

    def list_consumer_group_offsets(self,
                                    group_id,
                                    group_coordinator_id=None,
                                    partitions=None):
        """Fetch Consumer Group Offsets.

        Note:
        This does not verify that the group_id or partitions actually exist
        in the cluster.

        As soon as any error is encountered, it is immediately raised.

        :param group_id: The consumer group id name for which to fetch offsets.
        :param group_coordinator_id: The node_id of the group's coordinator
            broker. If set to None, will query the cluster to find the group
            coordinator. Explicitly specifying this can be useful to prevent
            that extra network round trip if you already know the group
            coordinator. Default: None.
        :param partitions: A list of TopicPartitions for which to fetch
            offsets. On brokers >= 0.10.2, this can be set to None to fetch all
            known offsets for the consumer group. Default: None.
        :return dictionary: A dictionary with TopicPartition keys and
            OffsetAndMetada values. Partitions that are not specified and for
            which the group_id does not have a recorded offset are omitted. An
            offset value of `-1` indicates the group_id has no offset for that
            TopicPartition. A `-1` can only happen for partitions that are
            explicitly specified.
        """
        group_offsets_listing = {}
        if group_coordinator_id is None:
            group_coordinator_id = self._find_group_coordinator_id(group_id)
        version = self._matching_api_version(OffsetFetchRequest)
        if version <= 3:
            if partitions is None:
                if version <= 1:
                    raise ValueError(
                        """OffsetFetchRequest_v{} requires specifying the
                        partitions for which to fetch offsets. Omitting the
                        partitions is only supported on brokers >= 0.10.2.
                        For details, see KIP-88.""".format(version))
                topics_partitions = None
            else:
                # transform from [TopicPartition("t1", 1), TopicPartition("t1", 2)] to [("t1", [1, 2])]
                topics_partitions_dict = defaultdict(set)
                for topic, partition in partitions:
                    topics_partitions_dict[topic].add(partition)
                topics_partitions = list(six.iteritems(topics_partitions_dict))
            request = OffsetFetchRequest[version](group_id, topics_partitions)
            future = self._send_request_to_node(group_coordinator_id, request)
            self._wait_for_futures([future])
            response = future.value

            if version > 1:  # OffsetFetchResponse_v1 lacks a top-level error_code
                error_type = Errors.for_code(response.error_code)
                if error_type is not Errors.NoError:
                    # optionally we could retry if error_type.retriable
                    raise error_type(
                        "Request '{}' failed with response '{}'.".format(
                            request, response))
            # transform response into a dictionary with TopicPartition keys and
            # OffsetAndMetada values--this is what the Java AdminClient returns
            for topic, partitions in response.topics:
                for partition, offset, metadata, error_code in partitions:
                    error_type = Errors.for_code(error_code)
                    if error_type is not Errors.NoError:
                        raise error_type(
                            "Unable to fetch offsets for group_id {}, topic {}, partition {}"
                            .format(group_id, topic, partition))
                    group_offsets_listing[TopicPartition(
                        topic,
                        partition)] = OffsetAndMetadata(offset, metadata)
        else:
            raise NotImplementedError(
                "Support for OffsetFetch v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return group_offsets_listing

    # delete groups protocol not yet implemented
    # Note: send the request to the group's coordinator.

    def _wait_for_futures(self, futures):
        while not all(future.succeeded() for future in futures):
            for future in futures:
                self._client.poll(future=future)

                if future.failed():
                    raise future.exception  # pylint: disable-msg=raising-bad-type
Beispiel #10
0
class KafkaConsumerLag:
    def __init__(self, bootstrap_servers):

        self.client = KafkaClient(bootstrap_servers=bootstrap_servers)
        self.client.check_version()

    def _send(self, broker_id, request, response_type=None):

        f = self.client.send(broker_id, request)
        response = self.client.poll(future=f)

        if response_type:
            if response and len(response) > 0:
                for r in response:
                    if isinstance(r, response_type):
                        return r
        else:
            if response and len(response) > 0:
                return response[0]

        return None

    def check(self, group_topics=None, discovery=None):
        """
        {
            "<group>": {
                "state": <str>,
                "topics": {
                    "<topic>": {
                        "consumer_lag": <int>,
                        "partitions": {
                            "<partition>": {
                                "offset_first": <int>,
                                "offset_consumed": <int>,
                                "offset_last": <int>,
                                "lag": <int>
                            }
                        }
                    }
                }
            }
        }
        :param persist_groups:
        :return: consumer statistics
        """
        cluster = self.client.cluster
        brokers = cluster.brokers()

        # Consumer group ID -> list(topics)
        if group_topics is None:
            group_topics = {}

            if discovery is None:
                discovery = True
        else:
            group_topics = copy.deepcopy(group_topics)

        # Set of consumer group IDs
        consumer_groups = set(group_topics.iterkeys())

        # Set of all known topics
        topics = set(itertools.chain(*group_topics.itervalues()))

        # Consumer group ID -> coordinating broker
        consumer_coordinator = {}

        # Coordinating broker - > list(consumer group IDs)
        coordinator_consumers = {}

        results = {}

        for consumer_group in group_topics.iterkeys():
            results[consumer_group] = {'state': None, 'topics': {}}

        # Ensure connections to all brokers
        for broker in brokers:
            while not self.client.is_ready(broker.nodeId):
                self.client.ready(broker.nodeId)

        # Collect all active consumer groups
        if discovery:
            for broker in brokers:
                response = self._send(broker.nodeId, _ListGroupsRequest(),
                                      _ListGroupsResponse)

                if response:
                    for group in response.groups:
                        consumer_groups.add(group[0])

        # Identify which broker is coordinating each consumer group
        for group in consumer_groups:

            response = self._send(
                next(iter(brokers)).nodeId, _GroupCoordinatorRequest(group),
                _GroupCoordinatorResponse)

            if response:
                consumer_coordinator[group] = response.coordinator_id

                if response.coordinator_id not in coordinator_consumers:
                    coordinator_consumers[response.coordinator_id] = []

                coordinator_consumers[response.coordinator_id].append(group)

        # Populate consumer groups into dict
        for group in consumer_groups:
            if group not in group_topics:
                group_topics[group] = []

        # Add groups to results dict
        for group, topic_list in group_topics.iteritems():
            results[group] = {'state': None, 'topics': {}}

        # Identify group information and topics read by each consumer group
        for coordinator, consumers in coordinator_consumers.iteritems():

            response = self._send(coordinator,
                                  _DescribeGroupsRequest(consumers),
                                  _DescribeGroupsResponse)

            for group in response.groups:

                if group[1] in results:
                    results[group[1]]['state'] = group[2]
                    # TODO Also include member data?

                if discovery:
                    members = group[5]
                    for member in members:
                        try:
                            assignment = MemberAssignment.decode(member[4])
                            if assignment:
                                for partition in assignment.partition_assignment:
                                    topic = partition[0]

                                    # Add topic to topic set
                                    topics.add(topic)

                                    # Add topic to group
                                    group_topics[group[1]].append(topic)
                        except:
                            pass

        # Add topics to groups in results dict
        for group, topic_list in group_topics.iteritems():
            for topic in topic_list:
                results[group]['topics'][topic] = {
                    'consumer_lag': 0,
                    'partitions': {}
                }

        # For storing the latest offset for all partitions of all topics
        # topic -> partition -> offset
        start_offsets = {}
        end_offsets = {}

        # Identify all the topic partitions that each broker is leader for
        # and request next new offset for each partition
        for broker, partitions in cluster._broker_partitions.iteritems():

            # topic -> List(partition, time, max_offsets)
            request_partitions = {}

            for tp in partitions:
                if tp.topic in topics:
                    if tp.topic not in request_partitions:
                        request_partitions[tp.topic] = []

                    # Time value '-2' is to get the offset for first available message
                    request_partitions[tp.topic].append((tp.partition, -2, 1))

            # List(topic, List(partition, time, max_offsets))
            topic_partitions = []

            for tp in request_partitions.iteritems():
                topic_partitions.append(tp)

            # Request partition start offsets
            response = self._send(broker, _OffsetRequest(-1, topic_partitions),
                                  _OffsetResponse)

            if response:
                for offset in response.topics:
                    topic = offset[0]
                    if topic not in start_offsets:
                        start_offsets[topic] = {}

                    for p in offset[1]:
                        start_offsets[topic][p[0]] = p[2][0]

            for tp in topic_partitions:
                for i, ptm in enumerate(tp[1]):
                    # Time value '-1' is to get the offset for next new message
                    tp[1][i] = (ptm[0], -1, 1)

            # Request partition end offsets
            response = self._send(broker, _OffsetRequest(-1, topic_partitions),
                                  _OffsetResponse)

            if response:
                for offset in response.topics:
                    topic = offset[0]
                    if topic not in end_offsets:
                        end_offsets[topic] = {}

                    for p in offset[1]:
                        end_offsets[topic][p[0]] = p[2][0]

        # Populate with offset values
        for group, topics in group_topics.iteritems():

            coordinator = consumer_coordinator[group]

            # topic -> list(partition)
            request_partitions = {}

            for topic in topics:
                results[group]['topics'][topic]['consumer_lag'] = 0
                results[group]['topics'][topic]['partitions'] = {}

                if topic in start_offsets:
                    for p in start_offsets[topic]:
                        results[group]['topics'][topic]['partitions'][p] = {
                            'offset_first': start_offsets[topic][p],
                            'offset_last': end_offsets[topic][p],
                            'offset_consumed': 0,
                            'lag': 0
                        }

                        if topic not in request_partitions:
                            request_partitions[topic] = []
                        request_partitions[topic].append(p)

            # List(topic -> list(partition))
            topic_partitions = []

            for tp in request_partitions.iteritems():
                topic_partitions.append(tp)

            response = self._send(coordinator,
                                  _OffsetFetchRequest(group, topic_partitions),
                                  _OffsetFetchResponse)

            if response:
                for offset in response.topics:
                    topic = offset[0]
                    offsets = offset[1]

                    if topic not in results[group]['topics']:
                        continue

                    for p_offset in offsets:
                        partition = p_offset[0]
                        offset_consumed = p_offset[1]
                        p_results = results[group]['topics'][topic][
                            'partitions'][partition]

                        if offset_consumed != -1:
                            p_results['offset_consumed'] = offset_consumed
                            p_results['lag'] = p_results[
                                'offset_last'] - offset_consumed
                        else:
                            p_results['offset_consumed'] = 0
                            p_results['lag'] = p_results[
                                'offset_last'] - p_results['offset_first']

                        results[group]['topics'][topic][
                            'consumer_lag'] += p_results['lag']

        return results

    def close(self):
        if self.client:
            self.client.close()
Beispiel #11
0
class KafkaAdmin(object):
    """An class for administering the kafka cluster.

    The KafkaAdmin class will negotiate for the latest version of each message protocol format supported
    by both the kafka-python client library and the kafka broker.  Usage of optional fields from protocol
    versions that are not supported by the broker will result in UnsupportedVersionError exceptions.

    Use of this class requires a minimum broker version >= 0.10.0.0.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'kafka-python-{version}'
        reconnect_backoff_ms (int): The amount of time in milliseconds to
            wait before attempting to reconnect to a given host.
            Default: 50.
        reconnect_backoff_max_ms (int): The maximum amount of time in
            milliseconds to wait when reconnecting to a broker that has
            repeatedly failed to connect. If provided, the backoff per host
            will increase exponentially for each consecutive connection
            failure, up to this maximum. To avoid connection storms, a
            randomization factor of 0.2 will be applied to the backoff
            resulting in a random range between 20% below and 20% above
            the computed value. Default: 1000.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 30000.
        connections_max_idle_ms: Close idle connections after the number of
            milliseconds specified by this config. The broker closes idle
            connections after connections.max.idle.ms, so this avoids hitting
            unexpected socket disconnected errors on the client.
            Default: 540000
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        max_in_flight_requests_per_connection (int): Requests are pipelined
            to kafka brokers up to this number of maximum requests per
            broker connection. Default: 5.
        receive_buffer_bytes (int): The size of the TCP receive buffer
            (SO_RCVBUF) to use when reading data. Default: None (relies on
            system defaults). Java client defaults to 32768.
        send_buffer_bytes (int): The size of the TCP send buffer
            (SO_SNDBUF) to use when sending data. Default: None (relies on
            system defaults). Java client defaults to 131072.
        socket_options (list): List of tuple-arguments to socket.setsockopt
            to apply to broker connection sockets. Default:
            [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
            socket connections. If provided, all other ssl_* configurations
            will be ignored. Default: None.
        ssl_check_hostname (bool): Flag to configure whether SSL handshake
            should verify that the certificate matches the broker's hostname.
            Default: True.
        ssl_cafile (str): Optional filename of CA file to use in certificate
            veriication. Default: None.
        ssl_certfile (str): Optional filename of file in PEM format containing
            the client certificate, as well as any CA certificates needed to
            establish the certificate's authenticity. Default: None.
        ssl_keyfile (str): Optional filename containing the client private key.
            Default: None.
        ssl_password (str): Optional password to be used when loading the
            certificate chain. Default: None.
        ssl_crlfile (str): Optional filename containing the CRL to check for
            certificate expiration. By default, no CRL check is done. When
            providing a file, only the leaf certificate will be checked against
            this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
            Default: None.
        api_version (tuple): Specify which Kafka API version to use. If set
            to None, KafkaClient will attempt to infer the broker version by
            probing various APIs. Example: (0, 10, 2). Default: None
        api_version_auto_timeout_ms (int): number of milliseconds to throw a
            timeout exception from the constructor when checking the broker
            api version. Only applies if api_version is None
        selector (selectors.BaseSelector): Provide a specific selector
            implementation to use for I/O multiplexing.
            Default: selectors.DefaultSelector
        metrics (kafka.metrics.Metrics): Optionally provide a metrics
            instance for capturing network IO stats. Default: None.
        metric_group_prefix (str): Prefix for metric names. Default: ''
        sasl_mechanism (str): string picking sasl mechanism when security_protocol
            is SASL_PLAINTEXT or SASL_SSL. Currently only PLAIN is supported.
            Default: None
        sasl_plain_username (str): username for sasl PLAIN authentication.
            Default: None
        sasl_plain_password (str): password for sasl PLAIN authentication.
            Default: None
        sasl_kerberos_service_name (str): Service name to include in GSSAPI
            sasl mechanism handshake. Default: 'kafka'

    """
    DEFAULT_CONFIG = {
        # client configs
        'bootstrap_servers': 'localhost',
        'client_id': 'kafka-python-' + __version__,
        'request_timeout_ms': 30000,
        'connections_max_idle_ms': 9 * 60 * 1000,
        'reconnect_backoff_ms': 50,
        'reconnect_backoff_max_ms': 1000,
        'max_in_flight_requests_per_connection': 5,
        'receive_buffer_bytes': None,
        'send_buffer_bytes': None,
        'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
        'sock_chunk_bytes': 4096,  # undocumented experimental option
        'sock_chunk_buffer_count': 1000,  # undocumented experimental option
        'retry_backoff_ms': 100,
        'metadata_max_age_ms': 300000,
        'security_protocol': 'PLAINTEXT',
        'ssl_context': None,
        'ssl_check_hostname': True,
        'ssl_cafile': None,
        'ssl_certfile': None,
        'ssl_keyfile': None,
        'ssl_password': None,
        'ssl_crlfile': None,
        'api_version': None,
        'api_version_auto_timeout_ms': 2000,
        'selector': selectors.DefaultSelector,
        'sasl_mechanism': None,
        'sasl_plain_username': None,
        'sasl_plain_password': None,
        'sasl_kerberos_service_name': 'kafka',

        # metrics configs
        'metric_reporters': [],
        'metrics_num_samples': 2,
        'metrics_sample_window_ms': 30000,
    }

    def __init__(self, **configs):
        log.debug("Starting Kafka administration interface")
        extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
        if extra_configs:
            raise KafkaConfigurationError("Unrecognized configs: %s" %
                                          extra_configs)

        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)

        # api_version was previously a str. accept old format for now
        if isinstance(self.config['api_version'], str):
            deprecated = self.config['api_version']
            if deprecated == 'auto':
                self.config['api_version'] = None
            else:
                self.config['api_version'] = tuple(
                    map(int, deprecated.split('.')))
            log.warning(
                'use api_version=%s [tuple] -- "%s" as str is deprecated',
                str(self.config['api_version']), deprecated)

        # Configure metrics
        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(
            samples=self.config['metrics_num_samples'],
            time_window_ms=self.config['metrics_sample_window_ms'],
            tags=metrics_tags)
        reporters = [
            reporter() for reporter in self.config['metric_reporters']
        ]
        self._metrics = Metrics(metric_config, reporters)

        self._client = KafkaClient(metrics=self._metrics,
                                   metric_group_prefix='admin',
                                   **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = self._client.config['api_version']

        self._closed = False
        self._refresh_controller_id()
        log.debug('Kafka administration interface started')

    def close(self):
        """Close the administration connection to the kafka broker"""
        if not hasattr(self, '_closed') or self._closed:
            log.info('Kafka administration interface already closed')
            return

        self._metrics.close()
        self._client.close()
        self._closed = True
        log.debug('Kafka administartion interface has closed')

    def _matching_api_version(self, operation):
        """Find matching api version, the lesser of either the latest api version the library supports, or
        the max version supported by the broker

        :param operation: An operation array from kafka.protocol
        :return: The max matching version number between client and broker
        """
        version = min(
            len(operation) - 1,
            self._client.get_api_versions()[operation[0].API_KEY][1])
        if version < self._client.get_api_versions()[operation[0].API_KEY][0]:
            # max library version is less than min broker version.  Not sure any brokers
            # actually set a min version greater than 0 right now, tho.  But maybe in the future?
            raise UnsupportedVersionError(
                "Could not find matching protocol version for {}".format(
                    operation.__name__))
        return version

    def _validate_timeout(self, timeout_ms):
        """Validate the timeout is set or use the configuration default

        :param timeout_ms: The timeout provided by api call, in milliseconds
        :return: The timeout to use for the operation
        """
        return timeout_ms or self.config['request_timeout_ms']

    def _refresh_controller_id(self):
        """Determine the kafka cluster controller
        """
        response = self._send_request_to_node(self._client.least_loaded_node(),
                                              MetadataRequest[1]([]))
        self._controller_id = response.controller_id
        version = self._client.check_version(self._controller_id)
        if version < (0, 10, 0):
            raise UnsupportedVersionError(
                "Kafka Admin interface not supported for cluster controller version {} < 0.10.0.0"
                .format(version))

    def _send_request_to_node(self, node, request):
        """Send a kafka protocol message to a specific broker.  Will block until the message result is received.

        :param node: The broker id to which to send the message
        :param request: The message to send
        :return: The kafka protocol response for the message
        :exception: The exception if the message could not be sent
        """
        while not self._client.ready(node):
            # connection to broker not ready, poll until it is or send will fail with NodeNotReadyError
            self._client.poll()
        future = self._client.send(node, request)
        self._client.poll(future=future)
        if future.succeeded():
            return future.value
        else:
            raise future.exception  # pylint: disable-msg=raising-bad-type

    def _send(self, request):
        """Send a kafka protocol message to the cluster controller.  Will block until the message result is received.

        :param request: The message to send
        :return The kafka protocol response for the message
        :exception NodeNotReadyError: If the controller connection can't be established
        """
        remaining_tries = 2
        while remaining_tries > 0:
            remaining_tries = remaining_tries - 1
            try:
                return self._send_request_to_node(self._controller_id, request)
            except (NotControllerError, KafkaConnectionError) as e:
                # controller changed?  refresh it
                self._refresh_controller_id()
        raise NodeNotReadyError(self._controller_id)

    @staticmethod
    def _convert_new_topic_request(new_topic):
        return (
            new_topic.name, new_topic.num_partitions,
            new_topic.replication_factor, [
                (partition_id, replicas) for partition_id, replicas in
                new_topic.replica_assignments.items()
            ],
            [(config_key, config_value)
             for config_key, config_value in new_topic.topic_configs.items()])

    def create_topics(self, new_topics, timeout_ms=None, validate_only=None):
        """Create new topics in the cluster.

        :param new_topics: Array of NewTopic objects
        :param timeout_ms: Milliseconds to wait for new topics to be created before broker returns
        :param validate_only: If True, don't actually create new topics.  Not supported by all versions.
        :return: Appropriate version of CreateTopicResponse class
        """
        version = self._matching_api_version(CreateTopicsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version == 0:
            if validate_only:
                raise UnsupportedVersionError(
                    "validate_only not supported on cluster version {}".format(
                        self.config['api_version']))
            request = CreateTopicsRequest[version](create_topic_requests=[
                self._convert_new_topic_request(new_topic)
                for new_topic in new_topics
            ],
                                                   timeout=timeout_ms)
        elif version <= 2:
            validate_only = validate_only or False
            request = CreateTopicsRequest[version](create_topic_requests=[
                self._convert_new_topic_request(new_topic)
                for new_topic in new_topics
            ],
                                                   timeout=timeout_ms,
                                                   validate_only=validate_only)
        else:
            raise UnsupportedVersionError(
                "missing implementation of CreateTopics for library supported version {}"
                .format(version))
        return self._send(request)

    def delete_topics(self, topics, timeout_ms=None):
        """Delete topics from the cluster

        :param topics: Array of topic name strings
        :param timeout_ms: Milliseconds to wait for topics to be deleted before broker returns
        :return: Appropriate version of DeleteTopicsResponse class
        """
        version = self._matching_api_version(DeleteTopicsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version <= 1:
            request = DeleteTopicsRequest[version](topics=topics,
                                                   timeout=timeout_ms)
        else:
            raise UnsupportedVersionError(
                "missing implementation of DeleteTopics for library supported version {}"
                .format(version))
        return self._send(request)

    # list topics functionality is in ClusterMetadata

    # describe topics functionality is in ClusterMetadata

    # describe cluster functionality is in ClusterMetadata

    # describe_acls protocol not implemented

    # create_acls protocol not implemented

    # delete_acls protocol not implemented

    @staticmethod
    def _convert_describe_config_resource_request(config_resource):
        return (config_resource.resource_type, config_resource.name, [
            config_key
            for config_key, config_value in config_resource.configs.items()
        ] if config_resource.configs else None)

    def describe_configs(self, config_resources, include_synonyms=None):
        """Fetch configuration parameters for one or more kafka resources.

        :param config_resources: An array of ConfigResource objects.
            Any keys in ConfigResource.configs dict will be used to filter the result.  The configs dict should be None
            to get all values.  An empty dict will get zero values (as per kafka protocol).
        :param include_synonyms: If True, return synonyms in response.  Not supported by all versions.
        :return: Appropriate version of DescribeConfigsResponse class
        """
        version = self._matching_api_version(DescribeConfigsRequest)
        if version == 0:
            if include_synonyms:
                raise UnsupportedVersionError(
                    "include_synonyms not supported on cluster version {}".
                    format(self.config['api_version']))
            request = DescribeConfigsRequest[version](resources=[
                self._convert_describe_config_resource_request(config_resource)
                for config_resource in config_resources
            ])
        elif version <= 1:
            include_synonyms = include_synonyms or False
            request = DescribeConfigsRequest[version](
                resources=[
                    self._convert_describe_config_resource_request(
                        config_resource)
                    for config_resource in config_resources
                ],
                include_synonyms=include_synonyms)
        else:
            raise UnsupportedVersionError(
                "missing implementation of DescribeConfigs for library supported version {}"
                .format(version))
        return self._send(request)

    @staticmethod
    def _convert_alter_config_resource_request(config_resource):
        return (config_resource.resource_type, config_resource.name, [
            (config_key, config_value)
            for config_key, config_value in config_resource.configs.items()
        ])

    def alter_configs(self, config_resources):
        """Alter configuration parameters of one or more kafka resources.

        :param config_resources: An array of ConfigResource objects.
        :return: Appropriate version of AlterConfigsResponse class
        """
        version = self._matching_api_version(AlterConfigsRequest)
        if version == 0:
            request = AlterConfigsRequest[version](resources=[
                self._convert_alter_config_resource_request(config_resource)
                for config_resource in config_resources
            ])
        else:
            raise UnsupportedVersionError(
                "missing implementation of AlterConfigs for library supported version {}"
                .format(version))
        return self._send(request)

    # alter replica logs dir protocol not implemented

    # describe log dirs protocol not implemented

    @staticmethod
    def _convert_create_partitions_request(topic_name, new_partitions):
        return (topic_name, (new_partitions.total_count,
                             new_partitions.new_assignments))

    def create_partitions(self,
                          topic_partitions,
                          timeout_ms=None,
                          validate_only=None):
        """Create additional partitions for an existing topic.

        :param topic_partitions: A map of topic name strings to NewPartition objects
        :param timeout_ms: Milliseconds to wait for new partitions to be created before broker returns
        :param validate_only: If True, don't actually create new partitions.
        :return: Appropriate version of CreatePartitionsResponse class
        """
        version = self._matching_api_version(CreatePartitionsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        validate_only = validate_only or False
        if version == 0:
            request = CreatePartitionsRequest[version](
                topic_partitions=[
                    self._convert_create_partitions_request(
                        topic_name, new_partitions)
                    for topic_name, new_partitions in topic_partitions.items()
                ],
                timeout=timeout_ms,
                validate_only=validate_only)
        else:
            raise UnsupportedVersionError(
                "missing implementation of CreatePartitions for library supported version {}"
                .format(version))
        return self._send(request)

    # delete records protocol not implemented

    # create delegation token protocol not implemented

    # renew delegation token protocol not implemented

    # expire delegation_token protocol not implemented

    # describe delegation_token protocol not implemented

    def describe_consumer_groups(self, group_ids):
        """Describe a set of consumer groups.

        :param group_ids: A list of consumer group id names
        :return: Appropriate version of DescribeGroupsResponse class
        """
        version = self._matching_api_version(DescribeGroupsRequest)
        if version <= 1:
            request = DescribeGroupsRequest[version](groups=group_ids)
        else:
            raise UnsupportedVersionError(
                "missing implementation of DescribeGroups for library supported version {}"
                .format(version))
        return self._send(request)

    def list_consumer_groups(self):
        """List all consumer groups known to the cluster.

        :return: Appropriate version of ListGroupsResponse class
        """
        version = self._matching_api_version(ListGroupsRequest)
        if version <= 1:
            request = ListGroupsRequest[version]()
        else:
            raise UnsupportedVersionError(
                "missing implementation of ListGroups for library supported version {}"
                .format(version))
        return self._send(request)
Beispiel #12
0
class KafkaManager:
    """
    A class used to interact with Kafka and Zookeeper
    and easily retrive useful information
    """

    MAX_RETRY = 10
    MAX_POLL_RETRIES = 3
    MAX_ZK_RETRIES = 5
    TOPIC_RESOURCE_ID = 2
    DEFAULT_TIMEOUT = 15000
    SUCCESS_CODE = 0
    ZK_REASSIGN_NODE = '/admin/reassign_partitions'
    ZK_TOPIC_PARTITION_NODE = '/brokers/topics/'
    ZK_TOPIC_CONFIGURATION_NODE = '/config/topics/'

    # Not used yet.
    ZK_TOPIC_DELETION_NODE = '/admin/delete_topics/'

    def __init__(self, module, **configs):
        self.module = module
        self.zk_client = None
        self.client = KafkaClient(**configs)
        self.refresh()

    def init_zk_client(self, **configs):
        """
        Zookeeper client initialization
        """
        self.zk_client = KazooClient(**configs)
        self.zk_client.start()

    def close_zk_client(self):
        """
        Closes Zookeeper client
        """
        self.zk_client.stop()

    def close(self):
        """
        Closes Kafka client
        """
        self.client.close()

    def refresh(self):
        """
        Refresh topics state
        """
        fut = self.client.cluster.request_update()
        self.client.poll(future=fut)
        if not fut.succeeded():
            self.close()
            self.module.fail_json(
                msg='Error while updating topic state from Kafka server: %s.' %
                fut.exception)

    def create_topic(self,
                     name,
                     partitions,
                     replica_factor,
                     replica_assignment=[],
                     config_entries=[],
                     timeout=None):
        """
        Creates a topic
        Usable for Kafka version >= 0.10.1
        """
        if timeout is None:
            timeout = self.DEFAULT_TIMEOUT
        request = CreateTopicsRequest_v0(create_topic_requests=[
            (name, partitions, replica_factor, replica_assignment,
             config_entries)
        ],
                                         timeout=timeout)
        response = self.send_request_and_get_response(request)

        for topic, error_code in response.topic_errors:
            if error_code != self.SUCCESS_CODE:
                self.close()
                self.module.fail_json(
                    msg='Error while creating topic %s. '
                    'Error key is %s, %s.' %
                    (topic, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description))

    def delete_topic(self, name, timeout=None):
        """
        Deletes a topic
        Usable for Kafka version >= 0.10.1
        Need to know which broker is controller for topic
        """
        if timeout is None:
            timeout = self.DEFAULT_TIMEOUT
        request = DeleteTopicsRequest_v0(topics=[name], timeout=timeout)
        response = self.send_request_and_get_response(request)

        for topic, error_code in response.topic_error_codes:
            if error_code != self.SUCCESS_CODE:
                self.close()
                self.module.fail_json(
                    msg='Error while deleting topic %s. '
                    'Error key is: %s, %s. '
                    'Is option \'delete.topic.enable\' set to true on '
                    ' your Kafka server?' %
                    (topic, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description))

    @staticmethod
    def _convert_create_acls_resource_request_v0(acl_resource):
        if acl_resource.operation == ACLOperation.ANY:
            raise IllegalArgumentError("operation must not be ANY")
        if acl_resource.permission_type == ACLPermissionType.ANY:
            raise IllegalArgumentError("permission_type must not be ANY")

        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.principal, acl_resource.host,
                acl_resource.operation, acl_resource.permission_type)

    @staticmethod
    def _convert_create_acls_resource_request_v1(acl_resource):
        if acl_resource.operation == ACLOperation.ANY:
            raise IllegalArgumentError("operation must not be ANY")
        if acl_resource.permission_type == ACLPermissionType.ANY:
            raise IllegalArgumentError("permission_type must not be ANY")

        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.pattern_type, acl_resource.principal,
                acl_resource.host, acl_resource.operation,
                acl_resource.permission_type)

    @staticmethod
    def _convert_delete_acls_resource_request_v0(acl_resource):
        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.principal, acl_resource.host,
                acl_resource.operation, acl_resource.permission_type)

    @staticmethod
    def _convert_delete_acls_resource_request_v1(acl_resource):
        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.pattern_type, acl_resource.principal,
                acl_resource.host, acl_resource.operation,
                acl_resource.permission_type)

    def describe_acls(self, acl_resource, api_version):
        """Describe a set of ACLs
        """

        if api_version < parse_version('2.0.0'):
            request = DescribeAclsRequest_v0(
                resource_type=acl_resource.resource_type,
                resource_name=acl_resource.name,
                principal=acl_resource.principal,
                host=acl_resource.host,
                operation=acl_resource.operation,
                permission_type=acl_resource.permission_type)
        else:
            request = DescribeAclsRequest_v1(
                resource_type=acl_resource.resource_type,
                resource_name=acl_resource.name,
                resource_pattern_type_filter=acl_resource.pattern_type,
                principal=acl_resource.principal,
                host=acl_resource.host,
                operation=acl_resource.operation,
                permission_type=acl_resource.permission_type)

        response = self.send_request_and_get_response(request)

        if response.error_code != self.SUCCESS_CODE:
            self.close()
            self.module.fail_json(
                msg='Error while describing ACL %s. '
                'Error %s: %s.' %
                (acl_resource, response.error_code, response.error_message))

        return response.resources

    def create_acls(self, acl_resources, api_version):
        """Create a set of ACLs"""

        if api_version < parse_version('2.0.0'):
            request = CreateAclsRequest_v0(creations=[
                self._convert_create_acls_resource_request_v0(acl_resource)
                for acl_resource in acl_resources
            ])
        else:
            request = CreateAclsRequest_v1(creations=[
                self._convert_create_acls_resource_request_v1(acl_resource)
                for acl_resource in acl_resources
            ])
        response = self.send_request_and_get_response(request)

        for error_code, error_message in response.creation_responses:
            if error_code != self.SUCCESS_CODE:
                self.close()
                self.module.fail_json(
                    msg='Error while creating ACL %s. '
                    'Error %s: %s.' %
                    (acl_resources, error_code, error_message))

    def delete_acls(self, acl_resources, api_version):
        """Delete a set of ACLSs"""

        if api_version < parse_version('2.0.0'):
            request = DeleteAclsRequest_v0(filters=[
                self._convert_delete_acls_resource_request_v0(acl_resource)
                for acl_resource in acl_resources
            ])
        else:
            request = DeleteAclsRequest_v1(filters=[
                self._convert_delete_acls_resource_request_v1(acl_resource)
                for acl_resource in acl_resources
            ])

        response = self.send_request_and_get_response(request)

        for error_code, error_message, _ in response.filter_responses:
            if error_code != self.SUCCESS_CODE:
                self.close()
                self.module.fail_json(
                    msg='Error while deleting ACL %s. '
                    'Error %s: %s.' %
                    (acl_resources, error_code, error_message))

    def send_request_and_get_response(self, request):
        """
        Sends a Kafka protocol request and returns
        the associated response
        """
        try:
            node_id = self.get_controller()

        except UndefinedController:
            self.module.fail_json(
                msg='Cannot determine a controller for your current Kafka '
                'server. Is your Kafka server running and available on '
                '\'%s\' with security protocol \'%s\'?' %
                (self.client.config['bootstrap_servers'],
                 self.client.config['security_protocol']))

        except Exception as e:
            self.module.fail_json(
                msg='Cannot determine a controller for your current Kafka '
                'server. Is your Kafka server running and available on '
                '\'%s\' with security protocol \'%s\'? Are you using the '
                'library versions from given \'requirements.txt\'? '
                'Exception was: %s' %
                (self.client.config['bootstrap_servers'],
                 self.client.config['security_protocol'], e))

        if self.connection_check(node_id):
            future = self.client.send(node_id, request)
            self.client.poll(future=future)
            if future.succeeded():
                return future.value
            else:
                self.close()
                self.module.fail_json(
                    msg='Error while sending request %s to Kafka server: %s.' %
                    (request, future.exception))
        else:
            self.close()
            self.module.fail_json(
                msg='Connection is not ready, please check your client '
                'and server configurations.')

    def get_controller(self):
        """
        Returns the current controller
        """
        if self.client.cluster.controller is not None:
            node_id, _host, _port, _rack = self.client.cluster.controller
            return node_id
        else:
            raise UndefinedController(
                'Cant get a controller for this cluster.')

    def get_controller_id_for_topic(self, topic_name):
        """
        Returns current controller for topic
        """
        request = MetadataRequest_v1(topics=[topic_name])
        response = self.send_request_and_get_response(request)
        return response.controller_id

    def get_config_for_topic(self, topic_name, config_names):
        """
        Returns responses with configuration
        Usable with Kafka version >= 0.11.0
        """
        request = DescribeConfigsRequest_v0(resources=[(self.TOPIC_RESOURCE_ID,
                                                        topic_name,
                                                        config_names)])
        return self.send_request_and_get_response(request)

    def get_responses_from_client(self, connection_sleep=1):
        """
        Obtains response from server using poll()
        It may need some times to get the response, so we had some retries
        """
        retries = 0
        if self.get_awaiting_request() > 0:
            while retries < self.MAX_POLL_RETRIES:
                resp = self.client.poll()
                if resp:
                    return resp
                time.sleep(connection_sleep)
                retries += 1
            self.close()
            self.module.fail_json(
                msg='Error while getting responses : no response to request '
                'was obtained, please check your client and server '
                'configurations.')
        else:
            self.close()
            self.module.fail_json(
                msg='No pending request, please check your client and server '
                'configurations.')

    def get_topics(self):
        """
        Returns the topics list
        """
        return self.client.cluster.topics()

    def get_total_partitions_for_topic(self, topic):
        """
        Returns the number of partitions for topic
        """
        return len(self.client.cluster.partitions_for_topic(topic))

    def get_partitions_for_topic(self, topic):
        """
        Returns all partitions for topic, with information
        TODO do not use private property anymore
        """
        return self.client.cluster._partitions[topic]

    def get_total_brokers(self):
        """
        Returns number of brokers available
        """
        return len(self.client.cluster.brokers())

    def get_brokers(self):
        """
        Returns all brokers
        """
        return self.client.cluster.brokers()

    def get_api_version(self):
        """
        Returns Kafka server version
        """
        major, minor, patch = self.client.config['api_version']
        return '%s.%s.%s' % (major, minor, patch)

    def get_awaiting_request(self):
        """
        Returns the number of requests currently in the queue
        """
        return self.client.in_flight_request_count()

    def connection_check(self, node_id, connection_sleep=0.1):
        """
        Checks that connection with broker is OK and that it is possible to
        send requests
        Since the maybe_connect() function used in ready() is 'async', we
        need to manually call the poll() function to establish the connection
        to the node
        """
        retries = 0
        if not self.client.ready(node_id):
            while retries < self.MAX_RETRY:
                self.client.poll()
                if self.client.ready(node_id):
                    return True
                time.sleep(connection_sleep)
                retries += 1
            return False
        return True

    def is_topic_configuration_need_update(self, topic_name, topic_conf):
        """
        Checks whether topic's options need to be updated or not.
        Since the DescribeConfigsRequest does not give all current
        configuration entries for a topic, we need to use Zookeeper.
        Requires zk connection.
        """
        current_config, _zk_stats = self.zk_client.get(
            self.ZK_TOPIC_CONFIGURATION_NODE + topic_name)
        current_config = json.loads(current_config)['config']

        if len(topic_conf) != len(current_config.keys()):
            return True
        else:
            for conf_name, conf_value in topic_conf:
                if (conf_name not in current_config.keys()
                        or str(conf_value) != str(current_config[conf_name])):
                    return True

        return False

    def is_topic_partitions_need_update(self, topic_name, partitions):
        """
        Checks whether topic's partitions need to be updated or not.
        """
        total_partitions = self.get_total_partitions_for_topic(topic_name)
        need_update = False

        if partitions != total_partitions:
            if partitions > total_partitions:
                # increasing partition number
                need_update = True
            else:
                # decreasing partition number, which is not possible
                self.close()
                self.module.fail_json(
                    msg='Can\'t update \'%s\' topic partition from %s to %s :'
                    'only increase is possible.' %
                    (topic_name, total_partitions, partitions))

        return need_update

    def is_topic_replication_need_update(self, topic_name, replica_factor):
        """
        Checks whether a topic replica needs to be updated or not.
        """
        need_update = False
        for _id, part in self.get_partitions_for_topic(topic_name).items():
            _topic, _partition, _leader, replicas, _isr, _error = part
            if len(replicas) != replica_factor:
                need_update = True

        return need_update

    def update_topic_partitions(self, topic_name, partitions):
        """
        Updates the topic partitions
        Usable for Kafka version >= 1.0.0
        Requires to be the sended to the current controller of the Kafka
        cluster.
        The request requires to precise the total number of partitions and
        broker assignment for each new partition without forgeting replica.
        See NewPartitions class for explanations
        apache/kafka/clients/admin/NewPartitions.java#L53
        """
        brokers = []
        for node_id, _, _, _ in self.get_brokers():
            brokers.append(int(node_id))
        brokers_iterator = itertools.cycle(brokers)
        topic, _, _, replicas, _, _ = (
            self.get_partitions_for_topic(topic_name)[0])
        total_replica = len(replicas)
        old_partition = self.get_total_partitions_for_topic(topic_name)
        assignments = []
        for _new_partition in range(partitions - old_partition):
            assignment = []
            for _replica in range(total_replica):
                assignment.append(next(brokers_iterator))
            assignments.append(assignment)

        request = CreatePartitionsRequest_v0(topic_partitions=[
            (topic_name, (partitions, assignments))
        ],
                                             timeout=self.DEFAULT_TIMEOUT,
                                             validate_only=False)
        response = self.send_request_and_get_response(request)
        for topic, error_code, _error_message in response.topic_errors:
            if error_code != self.SUCCESS_CODE:
                self.close()
                self.module.fail_json(
                    msg='Error while updating topic \'%s\' partitions. '
                    'Error key is %s, %s. Request was %s.' %
                    (topic, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description,
                     str(request)))
        self.refresh()

    def update_topic_configuration(self, topic_name, topic_conf):
        """
        Updates the topic configuration
        Usable for Kafka version >= 0.11.0
        Requires to be the sended to the current controller of the Kafka
        cluster.
        """
        request = AlterConfigsRequest_v0(resources=[(self.TOPIC_RESOURCE_ID,
                                                     topic_name, topic_conf)],
                                         validate_only=False)
        response = self.send_request_and_get_response(request)

        for error_code, _, _, resource_name in response.resources:
            if error_code != self.SUCCESS_CODE:
                self.close()
                self.module.fail_json(
                    msg='Error while updating topic \'%s\' configuration. '
                    'Error key is %s, %s' %
                    (resource_name, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description))
        self.refresh()

    def get_assignment_for_replica_factor_update(self, topic_name,
                                                 replica_factor):
        """
        Generates a json assignment based on replica_factor given to update
        replicas for a topic.
        Uses all brokers available and distributes them as replicas using
        a round robin method.
        """
        all_replicas = []
        assign = {'partitions': [], 'version': 1}

        if replica_factor > self.get_total_brokers():
            self.close()
            self.close_zk_client()
            self.module.fail_json(
                msg='Error while updating topic \'%s\' replication factor : '
                'replication factor \'%s\' is more than available brokers '
                '\'%s\'' %
                (topic_name, replica_factor, self.get_total_brokers()))
        else:
            for node_id, _, _, _ in self.get_brokers():
                all_replicas.append(node_id)
            brokers_iterator = itertools.cycle(all_replicas)
            for _, part in self.get_partitions_for_topic(topic_name).items():
                _, partition, _, _, _, _ = part
                assign_tmp = {
                    'topic': topic_name,
                    'partition': partition,
                    'replicas': []
                }
                for _i in range(replica_factor):
                    assign_tmp['replicas'].append(next(brokers_iterator))
                assign['partitions'].append(assign_tmp)

            return bytes(str(json.dumps(assign)).encode('ascii'))

    def get_assignment_for_partition_update(self, topic_name, partitions):
        """
        Generates a json assignment based on number of partitions given to
        update partitions for a topic.
        Uses all brokers available and distributes them among partitions
        using a round robin method.
        """
        all_brokers = []
        assign = {'partitions': {}, 'version': 1}

        _, _, _, replicas, _, _ = self.get_partitions_for_topic(topic_name)[0]
        total_replica = len(replicas)

        for node_id, _host, _port, _rack in self.get_brokers():
            all_brokers.append(node_id)
        brokers_iterator = itertools.cycle(all_brokers)

        for i in range(partitions):
            assign_tmp = []
            for _j in range(total_replica):
                assign_tmp.append(next(brokers_iterator))
            assign['partitions'][str(i)] = assign_tmp

        return bytes(str(json.dumps(assign)).encode('ascii'))

    def wait_for_znode_assignment(self, zk_sleep_time, zk_max_retries):
        """
        Wait for the reassignment znode to be consumed by Kafka.

        Raises `ReassignPartitionsTimeout` if `zk_max_retries` is reached.
        """
        retries = 0
        while (self.zk_client.exists(self.ZK_REASSIGN_NODE)
               and retries < zk_max_retries):
            retries += 1
            time.sleep(zk_sleep_time)

        if retries >= zk_max_retries:
            raise ReassignPartitionsTimeout(
                'The znode %s, is still present after %s tries, giving up.'
                'Consider increasing your `zookeeper_max_retries` and/or '
                '`zookeeper_sleep_time` parameters and check your cluster.',
                self.ZK_REASSIGN_NODE, retries)

    def update_admin_assignment(self, json_assignment, zk_sleep_time,
                                zk_max_retries):
        """
Updates the topic replica factor using a json assignment
Cf core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala#L580
 1 - Send AlterReplicaLogDirsRequest to allow broker to create replica in
     the right log dir later if the replica has not been created yet.

  2 - Create reassignment znode so that controller will send
      LeaderAndIsrRequest to create replica in the broker
      def path = "/admin/reassign_partitions" ->
      zk.create("/admin/reassign_partitions", b"a value")
  case class ReplicaAssignment(
    @BeanProperty @JsonProperty("topic") topic: String,
    @BeanProperty @JsonProperty("partition") partition: Int,
    @BeanProperty @JsonProperty("replicas") replicas: java.util.List[Int])
  3 - Send AlterReplicaLogDirsRequest again to make sure broker will start
      to move replica to the specified log directory.
     It may take some time for controller to create replica in the broker
     Retry if the replica has not been created.
 It may be possible that the node '/admin/reassign_partitions' is already
 there for another topic. That's why we need to check for its existence
 and wait for its consumption if it is already present.
 Requires zk connection.
        """

        try:
            self.wait_for_znode_assignment(zk_sleep_time, zk_max_retries)
            self.zk_client.create(self.ZK_REASSIGN_NODE, json_assignment)
            self.wait_for_znode_assignment(zk_sleep_time, zk_max_retries)

        except ReassignPartitionsTimeout as e:
            self.close()
            self.close_zk_client()
            self.module.fail_json(msg=str(e))

        self.refresh()

    def update_topic_assignment(self, json_assignment, zknode):
        """
 Updates the topic partition assignment using a json assignment
 Used when Kafka version < 1.0.0
 Requires zk connection.
        """
        if not self.zk_client.exists(zknode):
            self.close()
            self.close_zk_client()
            self.module.fail_json(
                msg='Error while updating assignment: zk node %s missing. '
                'Is the topic name correct?' % (zknode))
        self.zk_client.set(zknode, json_assignment)
        self.refresh()
class KafkaManager:
    """
    A class used to interact with Kafka and Zookeeper
    and easily retrive useful information
    """

    MAX_RETRY = 10
    MAX_POLL_RETRIES = 3
    MAX_ZK_RETRIES = 5
    TOPIC_RESOURCE_ID = 2
    DEFAULT_TIMEOUT = 15000
    SUCCESS_CODE = 0
    ZK_REASSIGN_NODE = '/admin/reassign_partitions'
    ZK_TOPIC_PARTITION_NODE = '/brokers/topics/'
    ZK_TOPIC_CONFIGURATION_NODE = '/config/topics/'

    # Not used yet.
    ZK_TOPIC_DELETION_NODE = '/admin/delete_topics/'

    def __init__(self, **configs):
        self.zk_client = None
        self.zk_configuration = None
        self.zookeeper_sleep_time = 5
        self.zookeeper_max_retries = 5
        self.kafka_sleep_time = 5
        self.kafka_max_retries = 5
        self.client = KafkaClient(**configs)
        self.refresh()

    def init_zk_client(self):
        """
        Zookeeper client initialization
        """
        if (self.zk_configuration is None
                or self.zk_configuration['hosts'] == ''):
            raise MissingConfiguration(
                '\'zookeeper\', parameter is needed when '
                'parameter \'state\' is \'present\' for resource '
                '\'topic\'.')
        try:
            self.zk_client = KazooClient(**self.zk_configuration)
            self.zk_client.start()
        except Exception as e:
            raise ZookeeperBroken(
                msg='Error while initializing Zookeeper client : '
                '%s. Is your Zookeeper server available and '
                'running on \'%s\'?' % (e, self.zk_configuration['hosts']))

    def close_zk_client(self):
        """
        Closes Zookeeper client
        """
        self.zk_client.stop()

    def close_kafka_client(self):
        """
        Closes Kafka client
        """
        self.client.close()

    def close(self):
        """
        Closes any available client
        """
        self.close_kafka_client()
        if self.zk_client is not None:
            self.close_zk_client()

    def refresh(self):
        """
        Refresh topics state
        """
        fut = self.client.cluster.request_update()
        self.client.poll(future=fut)
        if not fut.succeeded():
            raise UnableToRefreshState(
                'Error while updating topic state from Kafka server: %s.' %
                fut.exception)

    def create_topic(self,
                     name,
                     partitions,
                     replica_factor,
                     replica_assignment=[],
                     config_entries=[],
                     timeout=None):
        """
        Creates a topic
        Usable for Kafka version >= 0.10.1
        """
        if timeout is None:
            timeout = self.DEFAULT_TIMEOUT
        request = CreateTopicsRequest_v0(create_topic_requests=[
            (name, partitions, replica_factor, replica_assignment,
             config_entries)
        ],
                                         timeout=timeout)
        response = self.send_request_and_get_response(request)

        for topic, error_code in response.topic_errors:
            if error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while creating topic %s. '
                    'Error key is %s, %s.' %
                    (topic, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description))

    def delete_topic(self, name, timeout=None):
        """
        Deletes a topic
        Usable for Kafka version >= 0.10.1
        Need to know which broker is controller for topic
        """
        if timeout is None:
            timeout = self.DEFAULT_TIMEOUT
        request = DeleteTopicsRequest_v0(topics=[name], timeout=timeout)
        response = self.send_request_and_get_response(request)

        for topic, error_code in response.topic_error_codes:
            if error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while deleting topic %s. Error key is: %s, %s. '
                    'Is option \'delete.topic.enable\' set to true on '
                    ' your Kafka server?' %
                    (topic, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description))

    @staticmethod
    def _convert_create_acls_resource_request_v0(acl_resource):
        if acl_resource.operation == ACLOperation.ANY:
            raise IllegalArgumentError("operation must not be ANY")
        if acl_resource.permission_type == ACLPermissionType.ANY:
            raise IllegalArgumentError("permission_type must not be ANY")

        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.principal, acl_resource.host,
                acl_resource.operation, acl_resource.permission_type)

    @staticmethod
    def _convert_create_acls_resource_request_v1(acl_resource):
        if acl_resource.operation == ACLOperation.ANY:
            raise IllegalArgumentError("operation must not be ANY")
        if acl_resource.permission_type == ACLPermissionType.ANY:
            raise IllegalArgumentError("permission_type must not be ANY")

        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.pattern_type, acl_resource.principal,
                acl_resource.host, acl_resource.operation,
                acl_resource.permission_type)

    @staticmethod
    def _convert_delete_acls_resource_request_v0(acl_resource):
        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.principal, acl_resource.host,
                acl_resource.operation, acl_resource.permission_type)

    @staticmethod
    def _convert_delete_acls_resource_request_v1(acl_resource):
        return (acl_resource.resource_type, acl_resource.name,
                acl_resource.pattern_type, acl_resource.principal,
                acl_resource.host, acl_resource.operation,
                acl_resource.permission_type)

    def describe_acls(self, acl_resource, api_version):
        """Describe a set of ACLs
        """

        if api_version < parse_version('2.0.0'):
            request = DescribeAclsRequest_v0(
                resource_type=acl_resource.resource_type,
                resource_name=acl_resource.name,
                principal=acl_resource.principal,
                host=acl_resource.host,
                operation=acl_resource.operation,
                permission_type=acl_resource.permission_type)
        else:
            request = DescribeAclsRequest_v1(
                resource_type=acl_resource.resource_type,
                resource_name=acl_resource.name,
                resource_pattern_type_filter=acl_resource.pattern_type,
                principal=acl_resource.principal,
                host=acl_resource.host,
                operation=acl_resource.operation,
                permission_type=acl_resource.permission_type)

        response = self.send_request_and_get_response(request)

        if response.error_code != self.SUCCESS_CODE:
            raise KafkaManagerError(
                'Error while describing ACL %s. Error %s: %s.' %
                (acl_resource, response.error_code, response.error_message))

        return response.resources

    def create_acls(self, acl_resources, api_version):
        """Create a set of ACLs"""

        if api_version < parse_version('2.0.0'):
            request = CreateAclsRequest_v0(creations=[
                self._convert_create_acls_resource_request_v0(acl_resource)
                for acl_resource in acl_resources
            ])
        else:
            request = CreateAclsRequest_v1(creations=[
                self._convert_create_acls_resource_request_v1(acl_resource)
                for acl_resource in acl_resources
            ])
        response = self.send_request_and_get_response(request)

        for error_code, error_message in response.creation_responses:
            if error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while creating ACL %s. Error %s: %s.' %
                    (acl_resources, error_code, error_message))

    def delete_acls(self, acl_resources, api_version):
        """Delete a set of ACLSs"""

        if api_version < parse_version('2.0.0'):
            request = DeleteAclsRequest_v0(filters=[
                self._convert_delete_acls_resource_request_v0(acl_resource)
                for acl_resource in acl_resources
            ])
        else:
            request = DeleteAclsRequest_v1(filters=[
                self._convert_delete_acls_resource_request_v1(acl_resource)
                for acl_resource in acl_resources
            ])

        response = self.send_request_and_get_response(request)

        for error_code, error_message, _ in response.filter_responses:
            if error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while deleting ACL %s. Error %s: %s.' %
                    (acl_resources, error_code, error_message))

    def send_request_and_get_response(self, request, node_id=None):
        """
        Sends a Kafka protocol request and returns
        the associated response
        """
        if node_id is None:
            try:
                node_id = self.get_controller()

            except UndefinedController:
                raise

            except Exception as e:
                raise KafkaManagerError(
                    'Cannot determine a controller for your current Kafka '
                    'server. Is your Kafka server running and available on '
                    '\'%s\' with security protocol \'%s\'? Are you using the '
                    'library versions from given \'requirements.txt\'? '
                    'Exception was: %s' %
                    (self.client.config['bootstrap_servers'],
                     self.client.config['security_protocol'], e))

        if self.connection_check(node_id):
            future = self.client.send(node_id, request)
            self.client.poll(future=future)
            if future.succeeded():
                return future.value
            else:
                raise KafkaManagerError(
                    'Error while sending request %s to Kafka server: %s.' %
                    (request, future.exception))
        else:
            raise KafkaManagerError(
                'Connection is not ready, please check your client '
                'and server configurations.')

    def get_controller(self):
        """
        Returns the current controller
        """
        if self.client.cluster.controller is not None:
            node_id, _host, _port, _rack = self.client.cluster.controller
            return node_id
        else:
            raise UndefinedController(
                'Cannot determine a controller for your current Kafka '
                'server. Is your Kafka server running and available on '
                '\'%s\' with security protocol \'%s\'?' %
                (self.client.config['bootstrap_servers'],
                 self.client.config['security_protocol']))

    def get_config_for_topic(self, topic_name, config_names):
        """
        Returns responses with configuration
        Usable with Kafka version >= 0.11.0
        """
        current_config = {}
        if parse_version(self.get_api_version()) < parse_version('1.1.0'):
            request = DescribeConfigsRequest_v0(
                resources=[(self.TOPIC_RESOURCE_ID, topic_name, config_names)])
            kafka_config = self.send_request_and_get_response(request)
            for error_code, _, _, _, config_entries in kafka_config.resources:
                for (config_names, config_values, _, is_default,
                     _) in config_entries:
                    if not is_default:
                        current_config[config_names] = config_values
        else:
            request = DescribeConfigsRequest_v1(
                resources=[(self.TOPIC_RESOURCE_ID, topic_name, config_names)])
            kafka_config = self.send_request_and_get_response(request)
            for error_code, _, _, _, config_entries in kafka_config.resources:
                for (config_names, config_values, _, config_source, _,
                     _) in config_entries:
                    # Dynamic topic config
                    if config_source == 1:
                        current_config[config_names] = config_values
        return current_config

    def get_topics(self):
        """
        Returns the topics list
        """
        return self.client.cluster.topics()

    def get_total_partitions_for_topic(self, topic):
        """
        Returns the number of partitions for topic
        """
        return len(self.client.cluster.partitions_for_topic(topic))

    def get_partitions_for_topic(self, topic):
        """
        Returns all partitions for topic, with information
        TODO do not use private property anymore
        """
        return self.client.cluster._partitions[topic]

    def get_total_brokers(self):
        """
        Returns number of brokers available
        """
        return len(self.client.cluster.brokers())

    def get_brokers(self):
        """
        Returns all brokers
        """
        return self.client.cluster.brokers()

    def get_api_version(self):
        """
        Returns Kafka server version
        """
        major, minor, patch = self.client.config['api_version']
        return '%s.%s.%s' % (major, minor, patch)

    def connection_check(self, node_id, connection_sleep=0.1):
        """
        Checks that connection with broker is OK and that it is possible to
        send requests
        Since the maybe_connect() function used in ready() is 'async', we
        need to manually call the poll() function to establish the connection
        to the node
        """
        retries = 0
        if not self.client.ready(node_id):
            while retries < self.MAX_RETRY:
                self.client.poll()
                if self.client.ready(node_id):
                    return True
                time.sleep(connection_sleep)
                retries += 1
            return False
        return True

    def is_topic_configuration_need_update(self, topic_name, topic_conf):
        """
        Checks whether topic's options need to be updated or not.
        Since the DescribeConfigsRequest does not give all current
        configuration entries for a topic, we need to use Zookeeper.
        Requires zk connection.
        """
        current_config = self.get_config_for_topic(topic_name, None)

        if len(topic_conf) != len(current_config.keys()):
            return True
        else:
            for conf_name, conf_value in topic_conf:
                if (conf_name not in current_config.keys()
                        or str(conf_value) != str(current_config[conf_name])):
                    return True

        return False

    def is_topic_partitions_need_update(self, topic_name, partitions):
        """
        Checks whether topic's partitions need to be updated or not.
        """
        total_partitions = self.get_total_partitions_for_topic(topic_name)
        need_update = False

        if partitions != total_partitions:
            if partitions > total_partitions:
                # increasing partition number
                need_update = True
            else:
                # decreasing partition number, which is not possible
                raise KafkaManagerError(
                    'Can\'t update \'%s\' topic partition from %s to %s :'
                    'only increase is possible.' %
                    (topic_name, total_partitions, partitions))

        return need_update

    def is_topic_replication_need_update(self, topic_name, replica_factor):
        """
        Checks whether a topic replica needs to be updated or not.
        """
        need_update = False
        for _id, part in self.get_partitions_for_topic(topic_name).items():
            _topic, _partition, _leader, replicas, _isr, _error = part
            if len(replicas) != replica_factor:
                need_update = True

        return need_update

    def update_topic_partitions(self, topic_name, partitions):
        """
        Updates the topic partitions
        Usable for Kafka version >= 1.0.0
        Requires to be the sended to the current controller of the Kafka
        cluster.
        The request requires to precise the total number of partitions and
        broker assignment for each new partition without forgeting replica.
        See NewPartitions class for explanations
        apache/kafka/clients/admin/NewPartitions.java#L53
        """
        brokers = []
        for node_id, _, _, _ in self.get_brokers():
            brokers.append(int(node_id))
        brokers_iterator = itertools.cycle(brokers)
        topic, _, _, replicas, _, _ = (
            self.get_partitions_for_topic(topic_name)[0])
        total_replica = len(replicas)
        old_partition = self.get_total_partitions_for_topic(topic_name)
        assignments = []
        for _new_partition in range(partitions - old_partition):
            assignment = []
            for _replica in range(total_replica):
                assignment.append(next(brokers_iterator))
            assignments.append(assignment)

        request = CreatePartitionsRequest_v0(topic_partitions=[
            (topic_name, (partitions, assignments))
        ],
                                             timeout=self.DEFAULT_TIMEOUT,
                                             validate_only=False)
        response = self.send_request_and_get_response(request)
        for topic, error_code, _error_message in response.topic_errors:
            if error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while updating topic \'%s\' partitions. '
                    'Error key is %s, %s. Request was %s.' %
                    (topic, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description, request))
        self.refresh()

    def update_topic_configuration(self, topic_name, topic_conf):
        """
        Updates the topic configuration
        Usable for Kafka version >= 0.11.0
        Requires to be the sended to the current controller of the Kafka
        cluster.
        """
        request = AlterConfigsRequest_v0(resources=[(self.TOPIC_RESOURCE_ID,
                                                     topic_name, topic_conf)],
                                         validate_only=False)
        response = self.send_request_and_get_response(request)

        for error_code, _, _, resource_name in response.resources:
            if error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while updating topic \'%s\' configuration. '
                    'Error key is %s, %s' %
                    (resource_name, kafka.errors.for_code(error_code).message,
                     kafka.errors.for_code(error_code).description))
        self.refresh()

    def get_assignment_for_replica_factor_update(self, topic_name,
                                                 replica_factor):
        """
        Generates a json assignment based on replica_factor given to update
        replicas for a topic.
        Uses all brokers available and distributes them as replicas using
        a round robin method.
        """
        all_replicas = []
        partitions = []

        if replica_factor > self.get_total_brokers():
            raise KafkaManagerError(
                'Error while updating topic \'%s\' replication factor : '
                'replication factor \'%s\' is more than available brokers '
                '\'%s\'' %
                (topic_name, replica_factor, self.get_total_brokers()))
        else:
            for node_id, _, _, _ in self.get_brokers():
                all_replicas.append(node_id)
            brokers_iterator = itertools.cycle(all_replicas)
            for _, part in self.get_partitions_for_topic(topic_name).items():
                _, partition, _, _, _, _ = part
                replicas = []
                for _i in range(replica_factor):
                    replicas.append(next(brokers_iterator))
                assign_tmp = (partition, replicas, {})
                partitions.append(assign_tmp)

            return [(topic_name, partitions, {})]

    def get_assignment_for_replica_factor_update_with_zk(
            self, topic_name, replica_factor):
        """
        Generates a json assignment based on replica_factor given to update
        replicas for a topic.
        Uses all brokers available and distributes them as replicas using
        a round robin method.
        """
        all_replicas = []
        assign = {'partitions': [], 'version': 1}

        if replica_factor > self.get_total_brokers():
            raise KafkaManagerError(
                'Error while updating topic \'%s\' replication factor : '
                'replication factor \'%s\' is more than available brokers '
                '\'%s\'' %
                (topic_name, replica_factor, self.get_total_brokers()))
        else:
            for node_id, _, _, _ in self.get_brokers():
                all_replicas.append(node_id)
            brokers_iterator = itertools.cycle(all_replicas)
            for _, part in self.get_partitions_for_topic(topic_name).items():
                _, partition, _, _, _, _ = part
                assign_tmp = {
                    'topic': topic_name,
                    'partition': partition,
                    'replicas': []
                }
                for _i in range(replica_factor):
                    assign_tmp['replicas'].append(next(brokers_iterator))
                assign['partitions'].append(assign_tmp)

            return bytes(str(json.dumps(assign)).encode('ascii'))

    def get_assignment_for_partition_update(self, topic_name, partitions):
        """
        Generates a json assignment based on number of partitions given to
        update partitions for a topic.
        Uses all brokers available and distributes them among partitions
        using a round robin method.
        """
        all_brokers = []
        assign = {'partitions': {}, 'version': 1}

        _, _, _, replicas, _, _ = self.get_partitions_for_topic(topic_name)[0]
        total_replica = len(replicas)

        for node_id, _host, _port, _rack in self.get_brokers():
            all_brokers.append(node_id)
        brokers_iterator = itertools.cycle(all_brokers)

        for i in range(partitions):
            assign_tmp = []
            for _j in range(total_replica):
                assign_tmp.append(next(brokers_iterator))
            assign['partitions'][str(i)] = assign_tmp

        return bytes(str(json.dumps(assign)).encode('ascii'))

    def wait_for_partition_assignement(self):
        """
        wait until all assignements is done.
        """
        retries = 0
        assignement_done = False
        while (not assignement_done and retries < self.kafka_max_retries):
            request = ListPartitionReassignmentsRequest_v0(timeout_ms=60000,
                                                           topics=None,
                                                           tags={})
            response = self.send_request_and_get_response(request)
            if len(response.topics) == 0:
                break
            retries += 1
            time.sleep(self.kafka_sleep_time)

        if retries >= self.kafka_max_retries:
            raise ReassignPartitionsTimeout(
                'Reassignement, is still in progress after %s tries,'
                'giving up. Consider increasing your `kafka_max_retries`'
                'and/or `kafka_sleep_time` parameters and check your'
                'cluster.', retries)

    def wait_for_znode_assignment(self):
        """
        Wait for the reassignment znode to be consumed by Kafka.

        Raises `ReassignPartitionsTimeout` if `zk_max_retries` is reached.
        """
        retries = 0
        while (self.zk_client.exists(self.ZK_REASSIGN_NODE)
               and retries < self.zookeeper_max_retries):
            retries += 1
            time.sleep(self.zookeeper_sleep_time)

        if retries >= self.zookeeper_max_retries:
            raise ReassignPartitionsTimeout(
                'The znode %s, is still present after %s tries, giving up.'
                'Consider increasing your `zookeeper_max_retries` and/or '
                '`zookeeper_sleep_time` parameters and check your cluster.',
                self.ZK_REASSIGN_NODE, retries)

    def update_admin_assignment(self, name, replica_factor):
        """
Updates the topic replica factor using a json assignment
Cf core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala#L580
 1 - Send AlterReplicaLogDirsRequest to allow broker to create replica in
     the right log dir later if the replica has not been created yet.

  2 - Create reassignment znode so that controller will send
      LeaderAndIsrRequest to create replica in the broker
      def path = "/admin/reassign_partitions" ->
      zk.create("/admin/reassign_partitions", b"a value")
  case class ReplicaAssignment(
    @BeanProperty @JsonProperty("topic") topic: String,
    @BeanProperty @JsonProperty("partition") partition: Int,
    @BeanProperty @JsonProperty("replicas") replicas: java.util.List[Int])
  3 - Send AlterReplicaLogDirsRequest again to make sure broker will start
      to move replica to the specified log directory.
     It may take some time for controller to create replica in the broker
     Retry if the replica has not been created.
 It may be possible that the node '/admin/reassign_partitions' is already
 there for another topic. That's why we need to check for its existence
 and wait for its consumption if it is already present.
 Requires zk connection.
        """
        if (parse_version(self.get_api_version()) >= parse_version('2.4.0')):
            assign = self.get_assignment_for_replica_factor_update(
                name, replica_factor)
            request = AlterPartitionReassignmentsRequest_v0(timeout_ms=60000,
                                                            topics=assign,
                                                            tags={})
            self.wait_for_partition_assignement()
            self.send_request_and_get_response(request)
            self.wait_for_partition_assignement()
        elif self.zk_configuration is not None:
            try:
                json_assignment = (
                    self.get_assignment_for_replica_factor_update_with_zk(
                        name, replica_factor))
                self.init_zk_client()
                self.wait_for_znode_assignment()
                self.zk_client.create(self.ZK_REASSIGN_NODE, json_assignment)
                self.wait_for_znode_assignment()
            finally:
                self.close_zk_client()
        else:
            raise KafkaManagerError(
                'Zookeeper is mandatory for partition assignment when \
            using Kafka <= 2.4.0.')
        self.refresh()

    def update_topic_assignment(self, json_assignment, zknode):
        """
 Updates the topic partition assignment using a json assignment
 Used when Kafka version < 1.0.0
 Requires zk connection.
        """
        try:
            self.init_zk_client()
            if not self.zk_client.exists(zknode):
                raise KafkaManagerError(
                    'Error while updating assignment: zk node %s missing. '
                    'Is the topic name correct?' % (zknode))
            self.zk_client.set(zknode, json_assignment)
            self.refresh()
        finally:
            self.close_zk_client()

    @staticmethod
    def generate_consumer_groups_for_broker(broker, response):
        """
    From a `broker` and `response` generate a list of consumer groups
        """
        consumer_groups = {}
        for err, gid, gstate, prot_type, prot, _members in response.groups:
            members = {}
            for mid, cid, chost, mdata, assign in _members:
                mdata = ProtocolMetadata.decode(mdata)
                assign = MemberAssignment.decode(assign)
                assignment = {}
                for t, p in assign.assignment:
                    assignment[t] = p
                members[mid] = {
                    'client_id': cid,
                    'client_host': chost,
                    'member_metadata': {
                        'version': mdata.version,
                        'subscription': mdata.subscription,
                        'user_data': mdata.user_data.decode('utf-8')
                    },
                    'member_assignment': {
                        'version': assign.version,
                        'assignment': assignment,
                        'user_data': assign.user_data.decode('utf-8')
                    }
                }
            group = {
                'error_code': err,
                'group_state': gstate,
                'members': members,
                'protocol_type': prot_type,
                'protocol': prot,
                'coordinator': {
                    'host': broker.host,
                    'nodeId': broker.nodeId,
                    'port': broker.port,
                    'rack': broker.rack
                }
            }
            consumer_groups[gid] = group
        return consumer_groups

    def get_consumer_groups_resource(self):
        """
Return a dict object containing information about consumer groups and
following this structure:
{
    "AWESOME_consumer_group_1607465801": {
        "coordinator": {
            "host": "172.17.0.9",
            "nodeId": 1001,
            "port": 9092,
            "rack": null
        },
        "error_code": 0,
        "group_state": "Empty",
        "members": {},
        "protocol": "",
        "protocol_type": "consumer"
    },
    "AWESOME_consumer_group_1607466258": {
        "coordinator": {
            "host": "172.17.0.10",
            "nodeId": 1002,
            "port": 9092,
            "rack": null
        },
        "error_code": 0,
        "group_state": "Stable",
        "members": {
            "kafka-python-2.0.1-e5500fee-8df9-4f37-bcd7-788522a1c382": {
                "client_host": "/172.17.0.1",
                "client_id": "kafka-python-2.0.1",
                "member_assignment": {
                    "assignment": {
                        "test_1607465755": [
                            0
                        ]
                    },
                    "user_data": "",
                    "version": 0
                },
                "member_metadata": {
                    "subscription": [
                        "test_1607465755"
                    ],
                    "user_data": "",
                    "version": 0
                }
            }
        },
        "protocol": "range",
        "protocol_type": "consumer"
    }
}
        """
        consumer_groups = {}
        for broker in self.get_brokers():
            request = ListGroupsRequest_v2()
            response = self.send_request_and_get_response(
                request, node_id=broker.nodeId)
            if response.error_code != self.SUCCESS_CODE:
                raise KafkaManagerError(
                    'Error while list consumer groups of %s. '
                    'Error key is %s, %s.' %
                    (broker.nodeId, kafka.errors.for_code(
                        response.error_code).message,
                     kafka.errors.for_code(response.error_code).description))
            if response.groups:
                request = DescribeGroupsRequest_v0(groups=tuple(
                    [group for group, protocol in response.groups]))
                response = self.send_request_and_get_response(
                    request, node_id=broker.nodeId)
                consumer_groups.update(
                    self.generate_consumer_groups_for_broker(broker, response))

        return consumer_groups

    def get_brokers_resource(self):
        """
Return a dict object containing information about brokers and
following this structure:
{
    "1001": {
        "host": "172.17.0.9",
        "nodeId": 1001,
        "port": 9092,
        "rack": null
    },
    "1002": {
        "host": "172.17.0.10",
        "nodeId": 1002,
        "port": 9092,
        "rack": null
    }
}
        """
        brokers = {}
        for broker in self.get_brokers():
            brokers[broker.nodeId] = broker._asdict()
        return brokers

    def get_topics_resource(self):
        """
Return a dict object containing information about topics and partitions,
and following this structure:
{
    "test_1600378061": {
        "0": {
            "isr": [
                1002
            ],
            "leader": 1002,
            "replicas": [
                1002
            ]
        }
    }
}
        """
        topics = {}
        for topic in self.get_topics():
            topics[topic] = {}
            partitions = self.get_partitions_for_topic(topic)
            for partition, metadata in partitions.items():
                _, _, leader, replicas, isr, _ = metadata
                topics[topic][partition] = {
                    'leader': leader,
                    'replicas': replicas,
                    'isr': isr
                }
        return topics

    @property
    def resource_to_func(self):
        return {
            'topic': self.get_topics_resource,
            'broker': self.get_brokers_resource,
            'consumer_group': self.get_consumer_groups_resource
        }

    def get_resource(self, resource):
        if resource not in self.resource_to_func:
            raise ValueError('Unexpected resource "%s"' % resource)

        return self.resource_to_func[resource]()

    def ensure_topic(self, name, options, partitions, replica_factor):
        changed = False
        warn = None

        if self.is_topic_configuration_need_update(name, options):
            self.update_topic_configuration(name, options)
            changed = True

        if partitions > 0 and replica_factor > 0:
            # partitions and replica_factor are set
            if self.is_topic_replication_need_update(name, replica_factor):
                self.update_admin_assignment(name, replica_factor)
                changed = True

            if self.is_topic_partitions_need_update(name, partitions):
                cur_version = parse_version(self.get_api_version())
                if cur_version < parse_version('1.0.0'):
                    json_assignment = (
                        self.get_assignment_for_partition_update(
                            name, partitions))
                    zknode = '/brokers/topics/%s' % name
                    self.update_topic_assignment(json_assignment, zknode)
                else:
                    self.update_topic_partitions(name, partitions)
                changed = True
        else:
            # 0 or "default" (-1)
            warn = ("Current values of 'partitions' (%s) and "
                    "'replica_factor' (%s) does not let this lib to "
                    "perform any action related to partitions and "
                    "replication. SKIPPING." % (partitions, replica_factor))

        return changed, warn
Beispiel #14
0
class AdminClient(object):
    DEFAULT_CONFIG = {
        'bootstrap_servers': 'localhost',
    }
    DELETE_TIMEOUT = 1000
    CREATE_TIMEOUT = 1000
    ALTER_TIMEOUT = 1000

    def __init__(self, **configs):
        # Only check for extra config keys in top-level class
        extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
        if extra_configs:
            raise KafkaConfigurationError("Unrecognized configs: %s" %
                                          extra_configs)

        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)

        self._client = KafkaClient(**self.config)

    def _get_controller_id(self):
        """Get the cluster controller
        """
        node_id = self._client.least_loaded_node()
        m = MetadataRequest[1](topics=[])
        future = self._client.send(node_id, m)
        self._client.poll(future=future)
        response = future.value
        return response.controller_id

    def brokers(self):
        """ Get all brokers """
        return self._client.cluster.brokers()

    def topics(self, exclude_internal_topics=True):
        """Get all topics the user is authorized to view.

        Returns:
            dict: {topic (str): [PartitionMetadata]}
        """
        cluster = self._client.cluster
        if self._client._metadata_refresh_in_progress and self._client._topics:
            future = cluster.request_update()
            self._client.poll(future=future)
        stash = cluster.need_all_topic_metadata
        cluster.need_all_topic_metadata = True
        future = cluster.request_update()
        self._client.poll(future=future)
        cluster.need_all_topic_metadata = stash

        _topics = []
        # FIXME: this should be part of ClusterMetadata class
        for topic, partitions in cluster._partitions.items():
            if exclude_internal_topics and topic in cluster.internal_topics:
                continue
            _topics.append(Topic(topic, partitions.values()))

        return _topics

    def describe_topic(self, topic):
        """ Get all details about a topic, like current offsets for its partitions

        Returns: Topic or None
        """
        cluster = self._client.cluster
        if topic not in cluster.topics():
            # Refresh metadata
            self.topics()

        if topic in cluster.topics():
            topic = Topic(topic, cluster._partitions[topic].values())
        else:
            return None
        # Describe offsets
        for partition in topic.partitions:
            offsets = self.partition_offset(partition)
            partition.set_offset(offsets)

        return topic

    def partition_offset(self, partition):
        """
        Get the latest offset for a given topic partition
        """
        partition_timestamp = (partition.partition, -1)
        request = OffsetRequest[1](replica_id=-1,
                                   topics=[
                                       (partition.topic, [partition_timestamp
                                                          ]),
                                   ])
        future = self._client.send(partition.leader, request)
        self._client.poll(future=future)
        response = future.value
        topic = response.topics[0]
        return topic[-1][0][-1]

    def consumer_groups(self):
        """Get all consumer groups known to the cluster

        Returns:
            list: [Group]
        """
        groups = {}
        for broker in self._client.cluster.brokers():
            request = ListGroupsRequest[0]()
            future = self._client.send(broker.nodeId, request)
            self._client.poll(future=future)
            response = future.value
            if response:
                for g in response.groups:
                    group = g[0]
                    groups[group] = Group(group, broker.nodeId)
            else:
                log.error("No response for ListGroupsRequest")
        return list(groups.values())

    def describe_consumer_group(self, group):
        """
        Describe a consumer group

        Returns: Group
        """
        groups = self.consumer_groups()
        if group not in groups:
            return None
        group = groups[group]
        request = DescribeGroupsRequest[-1](groups=[group])
        future = self._client.send(group.coordinator_id, request)
        self._client.poll(future=future)
        response = future.value
        group.set_metadata(response.groups[0])
        return group

    def consumer_offset_info(self, consumer):
        """ Fetch and configure the consumed offset information for a given consumer group
        """
        all_topics = self.topics()
        topics_request = [(topic.name, [p.partition for p in topic.partitions])
                          for topic in all_topics]

        o = OffsetFetchRequest[1](consumer_group=consumer.name,
                                  topics=topics_request)
        future = self._client.send(consumer.coordinator_id, o)
        self._client.poll(future=future)
        response = future.value
        for r in response.topics:
            topic_name, offsets = r
            if any((o[1] != -1) for o in offsets):
                topic_info = self.describe_topic(topic_name)
                for o in offsets:
                    if o[1] == -1:
                        continue
                    partition = o[0]
                    offset = o[1]
                    consumer.set_offset(topic_name, partition, offset,
                                        topic_info.get_offset(partition))
        return consumer

    def create_topic(self, name, partitions, replication_factor):
        """ Create a new topic
        """
        node_id = self._get_controller_id()
        cc = CreateTopicsRequest[0](
            create_topic_requests=[
                (name, partitions, replication_factor, [], []),
            ],
            timeout=self.CREATE_TIMEOUT,
        )
        future = self._client.send(node_id, cc)
        self._client.poll(future=future)
        response = future.value
        error = response.topic_error_codes and response.topic_error_codes[
            0] and response.topic_error_codes[0][1]
        if error == 0:
            return True
        else:
            log.error('controler: {} create topic error: {}'.format(
                node_id, error))
            return False

    def delete_topic(self, name):
        """ Delete a topic """
        node_id = self._get_controller_id()
        d = DeleteTopicsRequest[0](topics=[name], timeout=self.DELETE_TIMEOUT)
        future = self._client.send(node_id, d)
        self._client.poll(future=future)
        response = future.value
        error = response.topic_error_codes and response.topic_error_codes[
            0] and response.topic_error_codes[0][1]
        if error == 0:
            return True
        else:
            log.error('controler: {} delete topic error: {}'.format(
                node_id, error))
            return False

    def alter_topic(self, topic_name, partitions):
        """ Add partitions """
        node_id = self._get_controller_id()
        a = CreatePartitionsRequest[0](
            topic_partitions=[(topic_name, (partitions, None))],
            timeout=self.ALTER_TIMEOUT,
            validate_only=False,
        )
        future = self._client.send(node_id, a)
        self._client.poll(future=future)
        response = future.value
        error = response.topic_errors and response.topic_errors[
            0] and response.topic_errors[0][1]
        if error == 0:
            return True
        else:
            log.error('controler: {} alter topic error: {}'.format(
                node_id, error))
            return False
Beispiel #15
0
class KafkaManager(object):
    """
    Easier access to Kafka information
    """

    TOPIC_RESOURCE_ID = 2
    MAX_POLL_RETRIES = 3
    MAX_RETRY = 10
    SUCCESS_CODE = 0

    def __init__(self, **configs):
        self.client = KafkaClient(**configs)
        self.refresh()

    def refresh(self):
        """
        Refresh topics state
        """
        fut = self.client.cluster.request_update()
        self.client.poll(future=fut)
        if not fut.succeeded():
            self.close()
            self.module.fail_json(
                msg='Error while updating topic state from Kafka server: %s.' %
                fut.exception)

    def close(self):
        """
        Closes the client. Must be called once
        the client is not used anymore.
        """
        self.client.close()

    def get_controller(self):
        """
        Return the current controller for cluster.
        """
        node_id, _host, _port, _rack = self.client.cluster.controller
        return node_id

    def get_topics(self):
        """
        Returns the topics list
        """
        return self.client.cluster.topics()

    def get_total_partitions_for_topic(self, topic):
        """
        Returns the number of partitions for topic
        """
        return len(self.client.cluster.partitions_for_topic(topic))

    def get_partitions_metadata_for_topic(self, topic):
        """
        Returns set of partition for topic
        """
        return self.client.cluster._partitions[topic]

    def get_config_for_topic(self, topic_name, config_name):
        """
        Returns value for config_name topic option
        """
        request = DescribeConfigsRequestV0(resources=[(self.TOPIC_RESOURCE_ID,
                                                       topic_name,
                                                       config_name)])
        response = self.send_request_and_get_response(request)
        for err_code, err_message, _, _, config_entries in response.resources:
            if err_code != self.SUCCESS_CODE:
                raise Exception(err_message)
            for _, value, _, _, _ in config_entries:
                return value

    @staticmethod
    def _map_to_quota_resources(entries):
        return [{
            'entity': [{
                'entity_type': entity['entity_type'],
                'entity_name': entity['entity_name']
            } for entity in entry['entity']],
            'quotas':
            {quota['name']: quota['value']
             for quota in entry['values']}
        } for entry in entries]

    def describe_quotas(self):
        request = DescribeClientQuotasRequest_v0(components=[])
        response = self.send_request_and_get_response(request)
        if response.error_code != 0:
            raise []
        return self._map_to_quota_resources(response.to_object()['entries'])

    def describe_acls(self, acl_resource):
        """Describe a set of ACLs
        """

        request = DescribeAclsRequest_v0(
            resource_type=acl_resource['resource_type'],
            resource_name=acl_resource['name'],
            principal=acl_resource['principal'],
            host=acl_resource['host'],
            operation=acl_resource['operation'],
            permission_type=acl_resource['permission_type'])

        response = self.send_request_and_get_response(request)

        if response.error_code == self.SUCCESS_CODE:
            return response.resources

        return None

    def connection_check(self, node_id, connection_sleep=1):
        """
        Checks that connection with broker is OK and that it is possible
        to send requests
        Since the _maybe_connect() function used in ready() is 'async',
        we need to manually call it several time to make the connection
        """
        retries = 0
        if not self.client.ready(node_id):
            while retries < self.MAX_RETRY:
                self.client.poll()
                if self.client.ready(node_id):
                    return True
                time.sleep(connection_sleep)
                retries += 1
            return False
        return True

    def send_request_and_get_response(self, request):
        """
        Send requet and get associated response
        """
        try:
            node_id = self.get_controller()
        except Exception:
            raise
        if self.connection_check(node_id):
            future = self.client.send(node_id, request)
            self.client.poll(future=future)
            if future.succeeded():
                return future.value
            else:
                raise future.exception

        return None
Beispiel #16
0
class KafkaConsumerGroups:
    kafka_brokers = None
    client = None
    timeout = None
    security_protocol = None
    sasl_mechanism = None
    sasl_plain_username = None
    sasl_plain_password = None
    ssl_certfile = None
    ssl_keyfile = None
    ssl_context = None

    def __init__(self,
                 kafka_brokers,
                 security_protocol,
                 sasl_mechanism,
                 sasl_plain_username,
                 sasl_plain_password,
                 ssl_context,
                 timeout=5000):
        self.kafka_brokers = kafka_brokers
        self.security_protocol = security_protocol
        self.sasl_mechanism = sasl_mechanism
        self.sasl_plain_username = sasl_plain_username
        self.sasl_plain_password = sasl_plain_password
        self.ssl_context = ssl_context
        self.timeout = timeout
        self.client = KafkaClient(bootstrap_servers=kafka_brokers,
                                  security_protocol=security_protocol,
                                  sasl_mechanism=sasl_mechanism,
                                  sasl_plain_username=sasl_plain_username,
                                  sasl_plain_password=sasl_plain_password,
                                  ssl_context=ssl_context,
                                  timeout=timeout)
        self.lag_topics_found = []
        self.lag_total = 0

    def list(self):
        list_groups_request = admin.ListGroupsRequest_v0(timeout=self.timeout)
        kafka_broker_ids = [
            broker.nodeId for broker in self.client.cluster.brokers()
        ]
        consumers_grp = {}
        for broker_id in kafka_broker_ids:
            current_of_tries = 0
            max_of_tries = 5
            data_from_node = False

            while not data_from_node and current_of_tries <= max_of_tries:
                future = self.client.send(broker_id, list_groups_request)
                self.client.poll(timeout_ms=self.timeout, future=future)
                if future.value is not None:
                    result = future.value.groups
                    for i in result:
                        consumers_grp.update({i[0]: broker_id})
                    data_from_node = True
                else:
                    current_of_tries += 1
                    time.sleep(0.5)

        return consumers_grp

    def get_members(self, node_id, group_name):
        describe_groups_request = admin.DescribeGroupsRequest_v0(
            groups=[(group_name)])
        future = self.client.send(node_id, describe_groups_request)
        self.client.poll(timeout_ms=self.timeout, future=future)

        (error_code, group_id, state, protocol_type, protocol,
         members) = future.value.groups[0]

        if error_code != 0:
            print(
                "Kafka API - RET admin.DescribeGroupsRequest, error_code={}, group_id={}, state={}, protocol_type={}, protocol={}, members_count={}"
                .format(error_code, group_id, state, protocol_type, protocol,
                        len(members)))
            exit(1)

        lmembers = []
        for member in members:
            (member_id, client_id, client_host, member_metadata,
             member_assignment) = member
            lmembers.append({
                'member_id': member_id,
                'client_id': client_id,
                'client_host': client_host
            })

        return lmembers

    def describe(self, node_id, group_name):
        describe_groups_request = admin.DescribeGroupsRequest_v0(
            groups=[(group_name)])
        future = self.client.send(node_id, describe_groups_request)
        self.client.poll(timeout_ms=self.timeout, future=future)

        (error_code, group_id, state, protocol_type, protocol,
         members) = future.value.groups[0]

        if error_code != 0:
            print(
                "Kafka API - RET admin.DescribeGroupsRequest, error_code={}, group_id={}, state={}, protocol_type={}, protocol={}, members_count={}"
                .format(error_code, group_id, state, protocol_type, protocol,
                        len(members)))
            exit(1)

        metadata_consumer_group = {
            'id': group_name,
            'state': state,
            'topics': [],
            'lag': 0,
            'members': []
        }

        if len(members) != 0:
            for member in members:
                (member_id, client_id, client_host, member_metadata,
                 member_assignment) = member
                member_topics_assignment = []
                for (topic, partitions) in MemberAssignment.decode(
                        member_assignment).assignment:
                    member_topics_assignment.append(topic)

                metadata_consumer_group['members'].append({
                    'member_id':
                    member_id,
                    'client_id':
                    client_id,
                    'client_host':
                    client_host,
                    'topic':
                    member_topics_assignment
                })

                metadata_consumer_group['topics'] += member_topics_assignment
                (lag_total, topics_found) = self.get_lag_by_topic_list(
                    group_name, topics=metadata_consumer_group['topics'])
                metadata_consumer_group[
                    'lag'] = metadata_consumer_group['lag'] + lag_total
        else:
            all_topics = self.client.cluster.topics()

            while '__consumer_offsets' in all_topics:
                all_topics.remove('__consumer_offsets')
            (lag_total,
             topics_found) = self.get_lag_by_topic_list(group_name,
                                                        topics=all_topics)

            metadata_consumer_group[
                'lag'] = metadata_consumer_group['lag'] + lag_total
            metadata_consumer_group['topics'] = topics_found

        return metadata_consumer_group

    def get_lag_by_topic_list(self, group_name, topics):
        self.lag_topics_found = []
        self.lag_total = 0

        topics = list(topics)
        no_threads = 16

        batches = [
            topics[i:i + no_threads] for i in range(0, len(topics), no_threads)
        ]
        for batch_topics in batches:
            threads = []
            for topic in batch_topics:
                t = threading.Thread(target=self.get_lag_by_topic,
                                     args=(
                                         group_name,
                                         topic,
                                     ))
                threads.append(t)
                t.start()

            for t in threads:
                if t.isAlive():
                    t.join()

        return self.lag_total, list(set(self.lag_topics_found))

    def get_lag_by_topic(self, group_name, topic):
        consumer = KafkaConsumer(bootstrap_servers=self.kafka_brokers,
                                 group_id=group_name,
                                 security_protocol=self.security_protocol,
                                 sasl_mechanism=self.sasl_mechanism,
                                 sasl_plain_username=self.sasl_plain_username,
                                 sasl_plain_password=self.sasl_plain_password,
                                 ssl_context=self.ssl_context)
        partitions_per_topic = consumer.partitions_for_topic(topic)

        for partition in partitions_per_topic:
            tp = TopicPartition(topic, partition)
            consumer.assign([tp])
            committed = consumer.committed(tp)
            consumer.seek_to_end(tp)
            last_offset = consumer.position(tp)
            if committed is not None and int(
                    committed) and last_offset is not None and int(
                        last_offset):
                self.lag_topics_found.append(topic)
                self.lag_total += (last_offset - committed)

        consumer.close(autocommit=False)
        return
Beispiel #17
0
class OffsetsFetcherAsync(object):

    DEFAULT_CONFIG = {
        'session_timeout_ms': 30000,
        'heartbeat_interval_ms': 3000,
        'retry_backoff_ms': 100,
        'api_version': (0, 9),
        'metric_group_prefix': ''
    }

    def __init__(self, **configs):
        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)
        self._client = KafkaClient(**self.config)
        self._coordinator_id = None
        self.group_id = configs['group_id']
        self.topic = configs['topic']

    def _ensure_coordinator_known(self):
        """Block until the coordinator for this group is known
        (and we have an active connection -- java client uses unsent queue).
        """
        while self._coordinator_unknown():

            # Prior to 0.8.2 there was no group coordinator
            # so we will just pick a node at random and treat
            # it as the "coordinator"
            if self.config['api_version'] < (0, 8, 2):
                self._coordinator_id = self._client.least_loaded_node()
                self._client.ready(self._coordinator_id)
                continue

            future = self._send_group_coordinator_request()
            self._client.poll(future=future)

            if future.failed():
                if isinstance(future.exception,
                              Errors.GroupCoordinatorNotAvailableError):
                    continue
                elif future.retriable():
                    metadata_update = self._client.cluster.request_update()
                    self._client.poll(future=metadata_update)
                else:
                    raise future.exception  # pylint: disable-msg=raising-bad-type

    def _coordinator_unknown(self):
        """Check if we know who the coordinator is and have an active connection

        Side-effect: reset _coordinator_id to None if connection failed

        Returns:
            bool: True if the coordinator is unknown
        """
        if self._coordinator_id is None:
            return True

        if self._client.is_disconnected(self._coordinator_id):
            self._coordinator_dead()
            return True

        return False

    def _coordinator_dead(self, error=None):
        """Mark the current coordinator as dead."""
        if self._coordinator_id is not None:
            log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
                        self._coordinator_id, self.group_id, error)
            self._coordinator_id = None

    def _send_group_coordinator_request(self):
        """Discover the current coordinator for the group.

        Returns:
            Future: resolves to the node id of the coordinator
        """
        node_id = self._client.least_loaded_node()
        if node_id is None:
            return Future().failure(Errors.NoBrokersAvailable())

        log.debug("Sending group coordinator request for group %s to broker %s",
                  self.group_id, node_id)
        request = GroupCoordinatorRequest[0](self.group_id)
        future = Future()
        _f = self._client.send(node_id, request)
        _f.add_callback(self._handle_group_coordinator_response, future)
        _f.add_errback(self._failed_request, node_id, request, future)
        return future

    def _handle_group_coordinator_response(self, future, response):
        log.debug("Received group coordinator response %s", response)
        if not self._coordinator_unknown():
            # We already found the coordinator, so ignore the request
            log.debug("Coordinator already known -- ignoring metadata response")
            future.success(self._coordinator_id)
            return

        error_type = Errors.for_code(response.error_code)
        if error_type is Errors.NoError:
            ok = self._client.cluster.add_group_coordinator(self.group_id, response)
            if not ok:
                # This could happen if coordinator metadata is different
                # than broker metadata
                future.failure(Errors.IllegalStateError())
                return

            self._coordinator_id = response.coordinator_id
            log.info("Discovered coordinator %s for group %s",
                     self._coordinator_id, self.group_id)
            self._client.ready(self._coordinator_id)
            future.success(self._coordinator_id)
        elif error_type is Errors.GroupCoordinatorNotAvailableError:
            log.debug("Group Coordinator Not Available; retry")
            future.failure(error_type())
        elif error_type is Errors.GroupAuthorizationFailedError:
            error = error_type(self.group_id)
            log.error("Group Coordinator Request failed: %s", error)
            future.failure(error)
        else:
            error = error_type()
            log.error("Unrecognized failure in Group Coordinator Request: %s",
                      error)
            future.failure(error)

    def _failed_request(self, node_id, request, future, error):
        log.error('Error sending %s to node %s [%s]',
                  request.__class__.__name__, node_id, error)
        # Marking coordinator dead
        # unless the error is caused by internal client pipelining
        if not isinstance(error, (Errors.NodeNotReadyError,
                                  Errors.TooManyInFlightRequests)):
            self._coordinator_dead()
        future.failure(error)

    def offsets(self, partitions, timestamp):
        """Fetch a single offset before the given timestamp for the set of partitions.

        Blocks until offset is obtained, or a non-retriable exception is raised

        Arguments:
            partitions (iterable of TopicPartition) The partition that needs fetching offset.
            timestamp (int): timestamp for fetching offset. -1 for the latest
                available, -2 for the earliest available. Otherwise timestamp
                is treated as epoch seconds.

        Returns:
            dict: TopicPartition and message offsets
        """
        retries = 3
        while retries > 0:
            offsets = {}
            for future in self._send_offset_request(partitions, timestamp):
                self._client.poll(future=future)

                if future.succeeded():
                    for tp, offset in future.value:
                        offsets[tp] = offset
                    continue

                if not future.retriable():
                    raise future.exception  # pylint: disable-msg=raising-bad-type

                if future.exception.invalid_metadata:
                    refresh_future = self._client.cluster.request_update()
                    self._client.poll(future=refresh_future)
                    log.warning("Got exception %s and kept the loop", future.exception)
            if offsets:
                return offsets
            retries -= 1
            log.warning("Retrying the offsets fetch loop (%d retries left)", retries)
        log.error("Unsuccessful offsets retrieval")
        return {}

    def _send_offset_request(self, partitions, timestamp):
        """Fetch a single offset before the given timestamp for the partition.

        Arguments:
            partitions iterable of TopicPartition: partitions that needs fetching offset
            timestamp (int): timestamp for fetching offset

        Returns:
            list of Future: resolves to the corresponding offset
        """
        topic = partitions[0].topic
        nodes_per_partitions = {}
        for partition in partitions:
            node_id = self._client.cluster.leader_for_partition(partition)
            if node_id is None:
                log.debug("Partition %s is unknown for fetching offset,"
                          " wait for metadata refresh", partition)
                return [Future().failure(Errors.StaleMetadata(partition))]
            elif node_id == -1:
                log.debug("Leader for partition %s unavailable for fetching offset,"
                          " wait for metadata refresh", partition)
                return [Future().failure(Errors.LeaderNotAvailableError(partition))]
            nodes_per_partitions.setdefault(node_id, []).append(partition)

        # Client returns a future that only fails on network issues
        # so create a separate future and attach a callback to update it
        # based on response error codes

        futures = []
        for node_id, partitions in six.iteritems(nodes_per_partitions):
            request = OffsetRequest[0](
                -1, [(topic, [(partition.partition, timestamp, 1) for partition in partitions])]
            )
            future_request = Future()
            _f = self._client.send(node_id, request)
            _f.add_callback(self._handle_offset_response, partitions, future_request)

            def errback(e):
                log.error("Offset request errback error %s", e)
                future_request.failure(e)
            _f.add_errback(errback)
            futures.append(future_request)

        return futures

    def _handle_offset_response(self, partitions, future, response):
        """Callback for the response of the list offset call above.

        Arguments:
            partition (TopicPartition): The partition that was fetched
            future (Future): the future to update based on response
            response (OffsetResponse): response from the server

        Raises:
            AssertionError: if response does not match partition
        """
        topic, partition_info = response.topics[0]
        assert len(response.topics) == 1, (
            'OffsetResponse should only be for a single topic')
        partition_ids = set([part.partition for part in partitions])
        result = []
        for pi in partition_info:
            part, error_code, offsets = pi
            assert topic == partitions[0].topic and part in partition_ids, (
                'OffsetResponse partition does not match OffsetRequest partition')
            error_type = Errors.for_code(error_code)
            if error_type is Errors.NoError:
                assert len(offsets) == 1, 'Expected OffsetResponse with one offset'
                log.debug("Fetched offset %s for partition %d", offsets[0], part)
                result.append((TopicPartition(topic, part), offsets[0]))
            elif error_type in (Errors.NotLeaderForPartitionError,
                                Errors.UnknownTopicOrPartitionError):
                log.debug("Attempt to fetch offsets for partition %s failed due"
                          " to obsolete leadership information, retrying.",
                          str(partitions))
                future.failure(error_type(partitions))
            else:
                log.warning("Attempt to fetch offsets for partition %s failed due to:"
                            " %s", partitions, error_type)
                future.failure(error_type(partitions))
        future.success(result)

    def fetch_committed_offsets(self, partitions):
        """Fetch the current committed offsets for specified partitions

        Arguments:
            partitions (list of TopicPartition): partitions to fetch

        Returns:
            dict: {TopicPartition: OffsetAndMetadata}
        """
        if not partitions:
            return {}

        while True:
            self._ensure_coordinator_known()

            # contact coordinator to fetch committed offsets
            future = self._send_offset_fetch_request(partitions)
            self._client.poll(future=future)

            if future.succeeded():
                return future.value

            if not future.retriable():
                raise future.exception  # pylint: disable-msg=raising-bad-type

            time.sleep(self.config['retry_backoff_ms'] / 1000.0)

    def _send_offset_fetch_request(self, partitions):
        """Fetch the committed offsets for a set of partitions.

        This is a non-blocking call. The returned future can be polled to get
        the actual offsets returned from the broker.

        Arguments:
            partitions (list of TopicPartition): the partitions to fetch

        Returns:
            Future: resolves to dict of offsets: {TopicPartition: int}
        """
        assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
        if not partitions:
            return Future().success({})

        elif self._coordinator_unknown():
            return Future().failure(Errors.GroupCoordinatorNotAvailableError)

        node_id = self._coordinator_id

        # Verify node is ready
        if not self._client.ready(node_id):
            log.debug("Node %s not ready -- failing offset fetch request",
                      node_id)
            return Future().failure(Errors.NodeNotReadyError)

        log.debug("Group %s fetching committed offsets for partitions: %s",
                  self.group_id, partitions)
        # construct the request
        topic_partitions = collections.defaultdict(set)
        for tp in partitions:
            topic_partitions[tp.topic].add(tp.partition)

        if self.config['api_version'] >= (0, 8, 2):
            request = OffsetFetchRequest[1](
                self.group_id,
                list(topic_partitions.items())
            )
        else:
            request = OffsetFetchRequest[0](
                self.group_id,
                list(topic_partitions.items())
            )

        # send the request with a callback
        future = Future()
        _f = self._client.send(node_id, request)
        _f.add_callback(self._handle_offset_fetch_response, future)
        _f.add_errback(self._failed_request, node_id, request, future)
        return future

    def _handle_offset_fetch_response(self, future, response):
        offsets = {}
        for topic, partitions in response.topics:
            for partition, offset, metadata, error_code in partitions:
                tp = TopicPartition(topic, partition)
                error_type = Errors.for_code(error_code)
                if error_type is not Errors.NoError:
                    error = error_type()
                    log.debug("Group %s failed to fetch offset for partition"
                              " %s: %s", self.group_id, tp, error)
                    if error_type is Errors.GroupLoadInProgressError:
                        # just retry
                        future.failure(error)
                    elif error_type is Errors.NotCoordinatorForGroupError:
                        # re-discover the coordinator and retry
                        self._coordinator_dead()
                        future.failure(error)
                    elif error_type in (Errors.UnknownMemberIdError,
                                        Errors.IllegalGenerationError):
                        future.failure(error)
                    elif error_type is Errors.UnknownTopicOrPartitionError:
                        log.warning("OffsetFetchRequest -- unknown topic %s"
                                    " (have you committed any offsets yet?)",
                                    topic)
                        continue
                    else:
                        log.error("Unknown error fetching offsets for %s: %s",
                                  tp, error)
                        future.failure(error)
                    return
                elif offset >= 0:
                    # record the position with the offset
                    # (-1 indicates no committed offset to fetch)
                    offsets[tp] = OffsetAndMetadata(offset, metadata)
                else:
                    log.debug("Group %s has no committed offset for partition"
                              " %s", self.group_id, tp)
        future.success(offsets)

    def get(self):
        topic_partitions = self._client.cluster.partitions_for_topic(self.topic)
        if not topic_partitions:
            future = self._client.cluster.request_update()
            log.info("No partitions available, performing metadata update.")
            self._client.poll(future=future)
            return {}
        partitions = [TopicPartition(self.topic, partition_id) for partition_id in topic_partitions]
        offsets = self.offsets(partitions, -1)
        committed = self.fetch_committed_offsets(partitions)
        lags = {}
        for tp, offset in six.iteritems(offsets):
            commit_offset = committed[tp] if tp in committed else 0
            numerical = commit_offset if isinstance(commit_offset, int) else commit_offset.offset
            lag = offset - numerical
            pid = tp.partition if isinstance(tp, TopicPartition) else tp
            log.debug("Lag for %s (%s): %s, %s, %s", self.topic, pid, offset, commit_offset, lag)
            lags[pid] = lag
        return lags
Beispiel #18
0
class KafkaAdminClient(object):
    """A class for administering the Kafka cluster.

    Warning:
        This is an unstable interface that was recently added and is subject to
        change without warning. In particular, many methods currently return
        raw protocol tuples. In future releases, we plan to make these into
        nicer, more pythonic objects. Unfortunately, this will likely break
        those interfaces.

    The KafkaAdminClient class will negotiate for the latest version of each message
    protocol format supported by both the kafka-python client library and the
    Kafka broker. Usage of optional fields from protocol versions that are not
    supported by the broker will result in IncompatibleBrokerVersion exceptions.

    Use of this class requires a minimum broker version >= 0.10.0.0.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'kafka-python-{version}'
        reconnect_backoff_ms (int): The amount of time in milliseconds to
            wait before attempting to reconnect to a given host.
            Default: 50.
        reconnect_backoff_max_ms (int): The maximum amount of time in
            milliseconds to wait when reconnecting to a broker that has
            repeatedly failed to connect. If provided, the backoff per host
            will increase exponentially for each consecutive connection
            failure, up to this maximum. To avoid connection storms, a
            randomization factor of 0.2 will be applied to the backoff
            resulting in a random range between 20% below and 20% above
            the computed value. Default: 1000.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 30000.
        connections_max_idle_ms: Close idle connections after the number of
            milliseconds specified by this config. The broker closes idle
            connections after connections.max.idle.ms, so this avoids hitting
            unexpected socket disconnected errors on the client.
            Default: 540000
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        max_in_flight_requests_per_connection (int): Requests are pipelined
            to kafka brokers up to this number of maximum requests per
            broker connection. Default: 5.
        receive_buffer_bytes (int): The size of the TCP receive buffer
            (SO_RCVBUF) to use when reading data. Default: None (relies on
            system defaults). Java client defaults to 32768.
        send_buffer_bytes (int): The size of the TCP send buffer
            (SO_SNDBUF) to use when sending data. Default: None (relies on
            system defaults). Java client defaults to 131072.
        socket_options (list): List of tuple-arguments to socket.setsockopt
            to apply to broker connection sockets. Default:
            [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
            socket connections. If provided, all other ssl_* configurations
            will be ignored. Default: None.
        ssl_check_hostname (bool): Flag to configure whether SSL handshake
            should verify that the certificate matches the broker's hostname.
            Default: True.
        ssl_cafile (str): Optional filename of CA file to use in certificate
            veriication. Default: None.
        ssl_certfile (str): Optional filename of file in PEM format containing
            the client certificate, as well as any CA certificates needed to
            establish the certificate's authenticity. Default: None.
        ssl_keyfile (str): Optional filename containing the client private key.
            Default: None.
        ssl_password (str): Optional password to be used when loading the
            certificate chain. Default: None.
        ssl_crlfile (str): Optional filename containing the CRL to check for
            certificate expiration. By default, no CRL check is done. When
            providing a file, only the leaf certificate will be checked against
            this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
            Default: None.
        api_version (tuple): Specify which Kafka API version to use. If set
            to None, KafkaClient will attempt to infer the broker version by
            probing various APIs. Example: (0, 10, 2). Default: None
        api_version_auto_timeout_ms (int): number of milliseconds to throw a
            timeout exception from the constructor when checking the broker
            api version. Only applies if api_version is None
        selector (selectors.BaseSelector): Provide a specific selector
            implementation to use for I/O multiplexing.
            Default: selectors.DefaultSelector
        metrics (kafka.metrics.Metrics): Optionally provide a metrics
            instance for capturing network IO stats. Default: None.
        metric_group_prefix (str): Prefix for metric names. Default: ''
        sasl_mechanism (str): Authentication mechanism when security_protocol
            is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
            PLAIN, GSSAPI, OAUTHBEARER.
        sasl_plain_username (str): username for sasl PLAIN authentication.
            Required if sasl_mechanism is PLAIN.
        sasl_plain_password (str): password for sasl PLAIN authentication.
            Required if sasl_mechanism is PLAIN.
        sasl_kerberos_service_name (str): Service name to include in GSSAPI
            sasl mechanism handshake. Default: 'kafka'
        sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
            instance. (See kafka.oauth.abstract). Default: None

    """
    DEFAULT_CONFIG = {
        # client configs
        'bootstrap_servers': 'localhost',
        'client_id': 'kafka-python-' + __version__,
        'request_timeout_ms': 30000,
        'connections_max_idle_ms': 9 * 60 * 1000,
        'reconnect_backoff_ms': 50,
        'reconnect_backoff_max_ms': 1000,
        'max_in_flight_requests_per_connection': 5,
        'receive_buffer_bytes': None,
        'send_buffer_bytes': None,
        'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
        'sock_chunk_bytes': 4096,  # undocumented experimental option
        'sock_chunk_buffer_count': 1000,  # undocumented experimental option
        'retry_backoff_ms': 100,
        'metadata_max_age_ms': 300000,
        'security_protocol': 'PLAINTEXT',
        'ssl_context': None,
        'ssl_check_hostname': True,
        'ssl_cafile': None,
        'ssl_certfile': None,
        'ssl_keyfile': None,
        'ssl_password': None,
        'ssl_crlfile': None,
        'api_version': None,
        'api_version_auto_timeout_ms': 2000,
        'selector': selectors.DefaultSelector,
        'sasl_mechanism': None,
        'sasl_plain_username': None,
        'sasl_plain_password': None,
        'sasl_kerberos_service_name': 'kafka',
        'sasl_oauth_token_provider': None,

        # metrics configs
        'metric_reporters': [],
        'metrics_num_samples': 2,
        'metrics_sample_window_ms': 30000,
    }

    def __init__(self, **configs):
        log.debug("Starting KafkaAdminClient with configuration: %s", configs)
        extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
        if extra_configs:
            raise KafkaConfigurationError("Unrecognized configs: {}".format(extra_configs))

        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)

        # Configure metrics
        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
                                     time_window_ms=self.config['metrics_sample_window_ms'],
                                     tags=metrics_tags)
        reporters = [reporter() for reporter in self.config['metric_reporters']]
        self._metrics = Metrics(metric_config, reporters)

        self._client = KafkaClient(metrics=self._metrics,
                                   metric_group_prefix='admin',
                                   **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = self._client.config['api_version']

        self._closed = False
        self._refresh_controller_id()
        log.debug("KafkaAdminClient started.")

    def close(self):
        """Close the KafkaAdminClient connection to the Kafka broker."""
        if not hasattr(self, '_closed') or self._closed:
            log.info("KafkaAdminClient already closed.")
            return

        self._metrics.close()
        self._client.close()
        self._closed = True
        log.debug("KafkaAdminClient is now closed.")

    def _matching_api_version(self, operation):
        """Find the latest version of the protocol operation supported by both
        this library and the broker.

        This resolves to the lesser of either the latest api version this
        library supports, or the max version supported by the broker.

        :param operation: A list of protocol operation versions from kafka.protocol.
        :return: The max matching version number between client and broker.
        """
        version = min(len(operation) - 1,
                      self._client.get_api_versions()[operation[0].API_KEY][1])
        if version < self._client.get_api_versions()[operation[0].API_KEY][0]:
            # max library version is less than min broker version. Currently,
            # no Kafka versions specify a min msg version. Maybe in the future?
            raise IncompatibleBrokerVersion(
                "No version of the '{}' Kafka protocol is supported by both the client and broker."
                .format(operation.__name__))
        return version

    def _validate_timeout(self, timeout_ms):
        """Validate the timeout is set or use the configuration default.

        :param timeout_ms: The timeout provided by api call, in milliseconds.
        :return: The timeout to use for the operation.
        """
        return timeout_ms or self.config['request_timeout_ms']

    def _refresh_controller_id(self):
        """Determine the Kafka cluster controller."""
        version = self._matching_api_version(MetadataRequest)
        if 1 <= version <= 6:
            request = MetadataRequest[version]()
            response = self._send_request_to_node(self._client.least_loaded_node(), request)
            controller_id = response.controller_id
            # verify the controller is new enough to support our requests
            controller_version = self._client.check_version(controller_id)
            if controller_version < (0, 10, 0):
                raise IncompatibleBrokerVersion(
                    "The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
                    .format(controller_version))
            self._controller_id = controller_id
        else:
            raise UnrecognizedBrokerVersion(
                "Kafka Admin interface cannot determine the controller using MetadataRequest_v{}."
                .format(version))

    def _find_group_coordinator_id(self, group_id):
        """Find the broker node_id of the coordinator of the given group.

        Sends a FindCoordinatorRequest message to the cluster. Will block until
        the FindCoordinatorResponse is received. Any errors are immediately
        raised.

        :param group_id: The consumer group ID. This is typically the group
            name as a string.
        :return: The node_id of the broker that is the coordinator.
        """
        # Note: Java may change how this is implemented in KAFKA-6791.
        #
        # TODO add support for dynamically picking version of
        # GroupCoordinatorRequest which was renamed to FindCoordinatorRequest.
        # When I experimented with this, GroupCoordinatorResponse_v1 didn't
        # match GroupCoordinatorResponse_v0 and I couldn't figure out why.
        gc_request = GroupCoordinatorRequest[0](group_id)
        gc_response = self._send_request_to_node(self._client.least_loaded_node(), gc_request)
        # use the extra error checking in add_group_coordinator() rather than
        # immediately returning the group coordinator.
        success = self._client.cluster.add_group_coordinator(group_id, gc_response)
        if not success:
            error_type = Errors.for_code(gc_response.error_code)
            assert error_type is not Errors.NoError
            # Note: When error_type.retriable, Java will retry... see
            # KafkaAdminClient's handleFindCoordinatorError method
            raise error_type(
                "Could not identify group coordinator for group_id '{}' from response '{}'."
                .format(group_id, gc_response))
        group_coordinator = self._client.cluster.coordinator_for_group(group_id)
        # will be None if the coordinator was never populated, which should never happen here
        assert group_coordinator is not None
        # will be -1 if add_group_coordinator() failed... but by this point the
        # error should have been raised.
        assert group_coordinator != -1
        return group_coordinator

    def _send_request_to_node(self, node_id, request):
        """Send a Kafka protocol message to a specific broker.

        Will block until the message result is received.

        :param node_id: The broker id to which to send the message.
        :param request: The message to send.
        :return: The Kafka protocol response for the message.
        :exception: The exception if the message could not be sent.
        """
        while not self._client.ready(node_id):
            # poll until the connection to broker is ready, otherwise send()
            # will fail with NodeNotReadyError
            self._client.poll()
        future = self._client.send(node_id, request)
        self._client.poll(future=future)
        if future.succeeded():
            return future.value
        else:
            raise future.exception  # pylint: disable-msg=raising-bad-type

    def _send_request_to_controller(self, request):
        """Send a Kafka protocol message to the cluster controller.

        Will block until the message result is received.

        :param request: The message to send.
        :return: The Kafka protocol response for the message.
        """
        tries = 2  # in case our cached self._controller_id is outdated
        while tries:
            tries -= 1
            response = self._send_request_to_node(self._controller_id, request)
            # In Java, the error fieldname is inconsistent:
            #  - CreateTopicsResponse / CreatePartitionsResponse uses topic_errors
            #  - DeleteTopicsResponse uses topic_error_codes
            # So this is a little brittle in that it assumes all responses have
            # one of these attributes and that they always unpack into
            # (topic, error_code) tuples.
            topic_error_tuples = (response.topic_errors if hasattr(response, 'topic_errors')
                else response.topic_error_codes)
            # Also small py2/py3 compatibility -- py3 can ignore extra values
            # during unpack via: for x, y, *rest in list_of_values. py2 cannot.
            # So for now we have to map across the list and explicitly drop any
            # extra values (usually the error_message)
            for topic, error_code in map(lambda e: e[:2], topic_error_tuples):
                error_type = Errors.for_code(error_code)
                if tries and error_type is NotControllerError:
                    # No need to inspect the rest of the errors for
                    # non-retriable errors because NotControllerError should
                    # either be thrown for all errors or no errors.
                    self._refresh_controller_id()
                    break
                elif error_type is not Errors.NoError:
                    raise error_type(
                        "Request '{}' failed with response '{}'."
                        .format(request, response))
            else:
                return response
        raise RuntimeError("This should never happen, please file a bug with full stacktrace if encountered")

    @staticmethod
    def _convert_new_topic_request(new_topic):
        return (
            new_topic.name,
            new_topic.num_partitions,
            new_topic.replication_factor,
            [
                (partition_id, replicas) for partition_id, replicas in new_topic.replica_assignments.items()
            ],
            [
                (config_key, config_value) for config_key, config_value in new_topic.topic_configs.items()
            ]
        )

    def create_topics(self, new_topics, timeout_ms=None, validate_only=False):
        """Create new topics in the cluster.

        :param new_topics: A list of NewTopic objects.
        :param timeout_ms: Milliseconds to wait for new topics to be created
            before the broker returns.
        :param validate_only: If True, don't actually create new topics.
            Not supported by all versions. Default: False
        :return: Appropriate version of CreateTopicResponse class.
        """
        version = self._matching_api_version(CreateTopicsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version == 0:
            if validate_only:
                raise IncompatibleBrokerVersion(
                    "validate_only requires CreateTopicsRequest >= v1, which is not supported by Kafka {}."
                    .format(self.config['api_version']))
            request = CreateTopicsRequest[version](
                create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics],
                timeout=timeout_ms
            )
        elif version <= 2:
            request = CreateTopicsRequest[version](
                create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics],
                timeout=timeout_ms,
                validate_only=validate_only
            )
        else:
            raise NotImplementedError(
                "Support for CreateTopics v{} has not yet been added to KafkaAdminClient."
                .format(version))
        # TODO convert structs to a more pythonic interface
        # TODO raise exceptions if errors
        return self._send_request_to_controller(request)

    def delete_topics(self, topics, timeout_ms=None):
        """Delete topics from the cluster.

        :param topics: A list of topic name strings.
        :param timeout_ms: Milliseconds to wait for topics to be deleted
            before the broker returns.
        :return: Appropriate version of DeleteTopicsResponse class.
        """
        version = self._matching_api_version(DeleteTopicsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version <= 1:
            request = DeleteTopicsRequest[version](
                topics=topics,
                timeout=timeout_ms
            )
            response = self._send_request_to_controller(request)
        else:
            raise NotImplementedError(
                "Support for DeleteTopics v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return response

    # list topics functionality is in ClusterMetadata
    # Note: if implemented here, send the request to the least_loaded_node()

    # describe topics functionality is in ClusterMetadata
    # Note: if implemented here, send the request to the controller

    # describe cluster functionality is in ClusterMetadata
    # Note: if implemented here, send the request to the least_loaded_node()

    # describe_acls protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # create_acls protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # delete_acls protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    @staticmethod
    def _convert_describe_config_resource_request(config_resource):
        return (
            config_resource.resource_type,
            config_resource.name,
            [
                config_key for config_key, config_value in config_resource.configs.items()
            ] if config_resource.configs else None
        )

    def describe_configs(self, config_resources, include_synonyms=False):
        """Fetch configuration parameters for one or more Kafka resources.

        :param config_resources: An list of ConfigResource objects.
            Any keys in ConfigResource.configs dict will be used to filter the
            result. Setting the configs dict to None will get all values. An
            empty dict will get zero values (as per Kafka protocol).
        :param include_synonyms: If True, return synonyms in response. Not
            supported by all versions. Default: False.
        :return: Appropriate version of DescribeConfigsResponse class.
        """
        version = self._matching_api_version(DescribeConfigsRequest)
        if version == 0:
            if include_synonyms:
                raise IncompatibleBrokerVersion(
                    "include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}."
                    .format(self.config['api_version']))
            request = DescribeConfigsRequest[version](
                resources=[self._convert_describe_config_resource_request(config_resource) for config_resource in config_resources]
            )
        elif version == 1:
            request = DescribeConfigsRequest[version](
                resources=[self._convert_describe_config_resource_request(config_resource) for config_resource in config_resources],
                include_synonyms=include_synonyms
            )
        else:
            raise NotImplementedError(
                "Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return self._send_request_to_node(self._client.least_loaded_node(), request)

    @staticmethod
    def _convert_alter_config_resource_request(config_resource):
        return (
            config_resource.resource_type,
            config_resource.name,
            [
                (config_key, config_value) for config_key, config_value in config_resource.configs.items()
            ]
        )

    def alter_configs(self, config_resources):
        """Alter configuration parameters of one or more Kafka resources.

        Warning:
            This is currently broken for BROKER resources because those must be
            sent to that specific broker, versus this always picks the
            least-loaded node. See the comment in the source code for details.
            We would happily accept a PR fixing this.

        :param config_resources: A list of ConfigResource objects.
        :return: Appropriate version of AlterConfigsResponse class.
        """
        version = self._matching_api_version(AlterConfigsRequest)
        if version == 0:
            request = AlterConfigsRequest[version](
                resources=[self._convert_alter_config_resource_request(config_resource) for config_resource in config_resources]
            )
        else:
            raise NotImplementedError(
                "Support for AlterConfigs v{} has not yet been added to KafkaAdminClient."
                .format(version))
        # TODO the Java client has the note:
        # // We must make a separate AlterConfigs request for every BROKER resource we want to alter
        # // and send the request to that specific broker. Other resources are grouped together into
        # // a single request that may be sent to any broker.
        #
        # So this is currently broken as it always sends to the least_loaded_node()
        return self._send_request_to_node(self._client.least_loaded_node(), request)

    # alter replica logs dir protocol not yet implemented
    # Note: have to lookup the broker with the replica assignment and send the request to that broker

    # describe log dirs protocol not yet implemented
    # Note: have to lookup the broker with the replica assignment and send the request to that broker

    @staticmethod
    def _convert_create_partitions_request(topic_name, new_partitions):
        return (
            topic_name,
            (
                new_partitions.total_count,
                new_partitions.new_assignments
            )
        )

    def create_partitions(self, topic_partitions, timeout_ms=None, validate_only=False):
        """Create additional partitions for an existing topic.

        :param topic_partitions: A map of topic name strings to NewPartition objects.
        :param timeout_ms: Milliseconds to wait for new partitions to be
            created before the broker returns.
        :param validate_only: If True, don't actually create new partitions.
            Default: False
        :return: Appropriate version of CreatePartitionsResponse class.
        """
        version = self._matching_api_version(CreatePartitionsRequest)
        timeout_ms = self._validate_timeout(timeout_ms)
        if version == 0:
            request = CreatePartitionsRequest[version](
                topic_partitions=[self._convert_create_partitions_request(topic_name, new_partitions) for topic_name, new_partitions in topic_partitions.items()],
                timeout=timeout_ms,
                validate_only=validate_only
            )
        else:
            raise NotImplementedError(
                "Support for CreatePartitions v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return self._send_request_to_controller(request)

    # delete records protocol not yet implemented
    # Note: send the request to the partition leaders

    # create delegation token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # renew delegation token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # expire delegation_token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    # describe delegation_token protocol not yet implemented
    # Note: send the request to the least_loaded_node()

    def describe_consumer_groups(self, group_ids, group_coordinator_id=None):
        """Describe a set of consumer groups.

        Any errors are immediately raised.

        :param group_ids: A list of consumer group IDs. These are typically the
            group names as strings.
        :param group_coordinator_id: The node_id of the groups' coordinator
            broker. If set to None, it will query the cluster for each group to
            find that group's coordinator. Explicitly specifying this can be
            useful for avoiding extra network round trips if you already know
            the group coordinator. This is only useful when all the group_ids
            have the same coordinator, otherwise it will error. Default: None.
        :return: A list of group descriptions. For now the group descriptions
            are the raw results from the DescribeGroupsResponse. Long-term, we
            plan to change this to return namedtuples as well as decoding the
            partition assignments.
        """
        group_descriptions = []
        version = self._matching_api_version(DescribeGroupsRequest)
        for group_id in group_ids:
            if group_coordinator_id is not None:
                this_groups_coordinator_id = group_coordinator_id
            else:
                this_groups_coordinator_id = self._find_group_coordinator_id(group_id)
            if version <= 1:
                # Note: KAFKA-6788 A potential optimization is to group the
                # request per coordinator and send one request with a list of
                # all consumer groups. Java still hasn't implemented this
                # because the error checking is hard to get right when some
                # groups error and others don't.
                request = DescribeGroupsRequest[version](groups=(group_id,))
                response = self._send_request_to_node(this_groups_coordinator_id, request)
                assert len(response.groups) == 1
                # TODO need to implement converting the response tuple into
                # a more accessible interface like a namedtuple and then stop
                # hardcoding tuple indices here. Several Java examples,
                # including KafkaAdminClient.java
                group_description = response.groups[0]
                error_code = group_description[0]
                error_type = Errors.for_code(error_code)
                # Java has the note: KAFKA-6789, we can retry based on the error code
                if error_type is not Errors.NoError:
                    raise error_type(
                        "Request '{}' failed with response '{}'."
                        .format(request, response))
                # TODO Java checks the group protocol type, and if consumer
                # (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
                # the members' partition assignments... that hasn't yet been
                # implemented here so just return the raw struct results
                group_descriptions.append(group_description)
            else:
                raise NotImplementedError(
                    "Support for DescribeGroups v{} has not yet been added to KafkaAdminClient."
                    .format(version))
        return group_descriptions

    def list_consumer_groups(self, broker_ids=None):
        """List all consumer groups known to the cluster.

        This returns a list of Consumer Group tuples. The tuples are
        composed of the consumer group name and the consumer group protocol
        type.

        Only consumer groups that store their offsets in Kafka are returned.
        The protocol type will be an empty string for groups created using
        Kafka < 0.9 APIs because, although they store their offsets in Kafka,
        they don't use Kafka for group coordination. For groups created using
        Kafka >= 0.9, the protocol type will typically be "consumer".

        As soon as any error is encountered, it is immediately raised.

        :param broker_ids: A list of broker node_ids to query for consumer
            groups. If set to None, will query all brokers in the cluster.
            Explicitly specifying broker(s) can be useful for determining which
            consumer groups are coordinated by those broker(s). Default: None
        :return list: List of tuples of Consumer Groups.
        :exception GroupCoordinatorNotAvailableError: The coordinator is not
            available, so cannot process requests.
        :exception GroupLoadInProgressError: The coordinator is loading and
            hence can't process requests.
        """
        # While we return a list, internally use a set to prevent duplicates
        # because if a group coordinator fails after being queried, and its
        # consumer groups move to new brokers that haven't yet been queried,
        # then the same group could be returned by multiple brokers.
        consumer_groups = set()
        if broker_ids is None:
            broker_ids = [broker.nodeId for broker in self._client.cluster.brokers()]
        version = self._matching_api_version(ListGroupsRequest)
        if version <= 2:
            request = ListGroupsRequest[version]()
            for broker_id in broker_ids:
                response = self._send_request_to_node(broker_id, request)
                error_type = Errors.for_code(response.error_code)
                if error_type is not Errors.NoError:
                    raise error_type(
                        "Request '{}' failed with response '{}'."
                        .format(request, response))
                consumer_groups.update(response.groups)
        else:
            raise NotImplementedError(
                "Support for ListGroups v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return list(consumer_groups)

    def list_consumer_group_offsets(self, group_id, group_coordinator_id=None,
                                    partitions=None):
        """Fetch Consumer Group Offsets.

        Note:
        This does not verify that the group_id or partitions actually exist
        in the cluster.

        As soon as any error is encountered, it is immediately raised.

        :param group_id: The consumer group id name for which to fetch offsets.
        :param group_coordinator_id: The node_id of the group's coordinator
            broker. If set to None, will query the cluster to find the group
            coordinator. Explicitly specifying this can be useful to prevent
            that extra network round trip if you already know the group
            coordinator. Default: None.
        :param partitions: A list of TopicPartitions for which to fetch
            offsets. On brokers >= 0.10.2, this can be set to None to fetch all
            known offsets for the consumer group. Default: None.
        :return dictionary: A dictionary with TopicPartition keys and
            OffsetAndMetada values. Partitions that are not specified and for
            which the group_id does not have a recorded offset are omitted. An
            offset value of `-1` indicates the group_id has no offset for that
            TopicPartition. A `-1` can only happen for partitions that are
            explicitly specified.
        """
        group_offsets_listing = {}
        if group_coordinator_id is None:
            group_coordinator_id = self._find_group_coordinator_id(group_id)
        version = self._matching_api_version(OffsetFetchRequest)
        if version <= 3:
            if partitions is None:
                if version <= 1:
                    raise ValueError(
                        """OffsetFetchRequest_v{} requires specifying the
                        partitions for which to fetch offsets. Omitting the
                        partitions is only supported on brokers >= 0.10.2.
                        For details, see KIP-88.""".format(version))
                topics_partitions = None
            else:
                # transform from [TopicPartition("t1", 1), TopicPartition("t1", 2)] to [("t1", [1, 2])]
                topics_partitions_dict = defaultdict(set)
                for topic, partition in partitions:
                    topics_partitions_dict[topic].add(partition)
                topics_partitions = list(six.iteritems(topics_partitions_dict))
            request = OffsetFetchRequest[version](group_id, topics_partitions)
            response = self._send_request_to_node(group_coordinator_id, request)
            if version > 1:  # OffsetFetchResponse_v1 lacks a top-level error_code
                error_type = Errors.for_code(response.error_code)
                if error_type is not Errors.NoError:
                    # optionally we could retry if error_type.retriable
                    raise error_type(
                        "Request '{}' failed with response '{}'."
                        .format(request, response))
            # transform response into a dictionary with TopicPartition keys and
            # OffsetAndMetada values--this is what the Java AdminClient returns
            for topic, partitions in response.topics:
                for partition, offset, metadata, error_code in partitions:
                    error_type = Errors.for_code(error_code)
                    if error_type is not Errors.NoError:
                        raise error_type(
                            "Unable to fetch offsets for group_id {}, topic {}, partition {}"
                            .format(group_id, topic, partition))
                    group_offsets_listing[TopicPartition(topic, partition)] = OffsetAndMetadata(offset, metadata)
        else:
            raise NotImplementedError(
                "Support for OffsetFetch v{} has not yet been added to KafkaAdminClient."
                .format(version))
        return group_offsets_listing