예제 #1
0
 def checkConsumerGroupName(self):
     __kc = KafkaAdminClient(bootstrap_servers=self.BOOTSTRAP_SERVERS)
     cgnTuple = (self.GROUP, "consumer")
     for i in __kc.list_consumer_groups():
         if cgnTuple == i:
             return True
     return False
예제 #2
0
파일: kafka.py 프로젝트: yutiansut/feast
def check_consumer_exist(bootstrap_servers, topic_name):
    admin = KafkaAdminClient(bootstrap_servers=bootstrap_servers)
    consumer_groups = admin.describe_consumer_groups(group_ids=[
        group_id for group_id, _ in admin.list_consumer_groups()
        if group_id.startswith("spark-kafka-source")
    ])
    subscriptions = {
        subscription
        for group in consumer_groups for member in group.members
        if not isinstance(member.member_metadata, bytes)
        for subscription in member.member_metadata.subscription
    }
    return topic_name in subscriptions
예제 #3
0
    def groups():

        filter = flask.request.args.get('filter')
        if filter == None:
            filter = ""

        admin_client = KafkaAdminClient(bootstrap_servers=brokerArray)
        consumer_groups = admin_client.list_consumer_groups()
        describeGroups = admin_client.describe_consumer_groups(consumer_groups)
        log(describeGroups)

        returnGroups = []

        for n in consumer_groups:
            if filter in n:
                returnGroups.append({"name": n[0]})
        return json.dumps(returnGroups)
예제 #4
0
class KafkaUtils(object):
    def __init__(self, bootstrap_servers: list, topic: str, group_id: str):
        self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers,
                                      api_version=(5, 5, 1),
                                      request_timeout_ms=1000)
        self.consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers)
        self.admin_client = KafkaAdminClient(
            bootstrap_servers=bootstrap_servers)
        self.bootstrap_servers = bootstrap_servers
        self.topic = topic
        self.group_id = group_id

    def has_consumer_group(self) -> bool:
        for group in self.admin_client.list_consumer_groups():
            if group[0] == self.group_id:
                return True
        return False

    def wait_until_consumer_group(self):
        do_until_true_with_timeout(self.has_consumer_group)

    def consume_messages_and_close(self):
        tmp_consumer = KafkaConsumer(self.topic,
                                     bootstrap_servers=self.bootstrap_servers,
                                     auto_offset_reset='earliest',
                                     group_id=self.group_id,
                                     consumer_timeout_ms=5000,
                                     enable_auto_commit=True)
        for msg in tmp_consumer:
            log.info(f"Found message [ {msg.value} ]")
        tmp_consumer.close()

    def ensure_topic_created(self):
        try:
            self.admin_client.create_topics([NewTopic(self.topic, 2, 1)])
        except TopicAlreadyExistsError:
            pass

    def _produce_record_sync(self, key: str, value: str):
        future = self.producer.send(self.topic, str.encode(value),
                                    str.encode(key))
        try:
            future.get(5)
            self.producer.flush(5)
        except KafkaError as e:
            logging.warning("Could not produce Kafka record!" + str(e))
            raise e

    def produce_element_with_delay(self, delay_ms: int):
        key = uuid()
        log.info(
            f"Producing element with key [ {key} ] and delay [ {delay_ms} ]")
        self._produce_record_sync(key, str(delay_ms))

    def _get_topic_partitions(self) -> list[TopicPartition]:
        return [
            TopicPartition(self.topic, partition)
            for partition in self.consumer.partitions_for_topic(self.topic)
        ]

    def get_latest_offsets(self) -> dict[int, int]:
        return convert_to_ordered_dict({
            topic_partition.partition: offset
            for (topic_partition, offset) in self.consumer.end_offsets(
                self._get_topic_partitions()).items()
        })

    def get_latest_offset_for_partition(self, partition: int) -> int:
        latest_offsets = self.get_latest_offsets()
        return latest_offsets.get(partition, -1)

    def get_offsets(self) -> dict[int, int]:
        return convert_to_ordered_dict({
            topic_partition.partition: offset_meta.offset
            for (topic_partition,
                 offset_meta) in self.admin_client.list_consumer_group_offsets(
                     self.group_id).items()
        })

    def get_offset_difference(self) -> OffsetDifference:
        return OffsetDifference(self.get_offsets(), self.get_latest_offsets())

    def wait_for_offset_catchup(self, timeout_seconds: int = 60):
        end_time = time.time() + timeout_seconds
        while time.time() < end_time:
            try:
                self.assert_group_up_to_date()
                return
            except Exception as e:
                log.info(e)
            time.sleep(1)
        raise Exception("Timed out!")

    def assert_group_up_to_date(self):
        assert self.get_offset_difference().is_up_to_date()

    def ensure_not_up_to_date_for_n_seconds(self, seconds: int):
        end_time = time.time() + seconds
        while time.time() < end_time:
            offset_difference = self.get_offset_difference()
            log.info("Offset difference: " + str(offset_difference))
            if offset_difference.is_up_to_date():
                raise Exception("Offsets are up to date!")
            time.sleep(2)
예제 #5
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from kafka import KafkaAdminClient

servers = ['192.168.5.110:9092']

adminClient = KafkaAdminClient(bootstrap_servers=servers)

adminClient.delete_topics(['test'])

print(adminClient.list_consumer_groups())

adminClient.close()
예제 #6
0
"""
Description:
    Test for Kafka features:
        1. Kafka Topic and Topic key
        2. Kafka Partitions
"""

from kafka import KafkaConsumer, TopicPartition, KafkaAdminClient

__consumer = KafkaConsumer(bootstrap_servers="localhost:9092")
con_topics = __consumer.topics()
part_of_topic = __consumer.partitions_for_topic(topic="logging_test")
print(f"All topics: {con_topics}")
print(f"All topics partition: {part_of_topic}")
# __test_topic = TopicPartition(topic="test", partition=1)

__admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
list_topics = __admin_client.list_topics()
list_consumer_groups = __admin_client.list_consumer_groups()
print(f"list_topics: {list_topics}")
print(f"list_consumer_groups: {list_consumer_groups}")
예제 #7
0
class ClientAdmin:
    """
    封装kafka-python KafkaAdminClient
    """
    Num_Partitions = 3
    Replication_Factor = 3

    def __init__(self):
        pass

    def __enter__(self):
        self.cfg = Config().cfg
        self.admin_client = KafkaAdminClient(
            bootstrap_servers=self.cfg["serList"],
            # api_version=self.cfg["apiVersion"],
            api_version_auto_timeout_ms=self.cfg["autoVersionTimeout"],
            security_protocol=self.cfg["protocol"],
            sasl_mechanism=self.cfg["mechanism"],
            sasl_kerberos_service_name=self.cfg["kerverosSerName"])
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.admin_client.close()

    @staticmethod
    def new_topic(topic_name: str, ):
        """
        generate new topic object
        :return:
        """
        return NewTopic(name=topic_name,
                        num_partitions=ClientAdmin.Num_Partitions,
                        replication_factor=ClientAdmin.Replication_Factor,
                        replica_assignments=None,
                        topic_configs=None)

    def create_topic(self, topic_name: str):
        """
        在集群中创建新的topic(topic配置采用默认模式)
        :param topic_name:
        :return:
        """
        topic_list = [self.new_topic(topic_name)]
        try:
            response = self.admin_client.create_topics(
                topic_list, timeout_ms=TIME_OUT_ADMIN)
        except TopicAlreadyExistsError:
            log.tag_error(
                KafkaInfo.KafkaAdmin,
                "Topic [%s] already exist! Create Failed !" % topic_name)
            raise ActionError(KafkaErr.TopicExist)
        return response

    def delete_topic(self, topic_name: str):
        """
        删除集群中的topic
        :param topic_name:
        :return:
        """
        topic_list = [topic_name]
        try:
            self.admin_client.delete_topics(topic_list,
                                            timeout_ms=TIME_OUT_ADMIN)
        except UnknownTopicOrPartitionError as e:
            log.tag_error(
                KafkaInfo.KafkaAdmin,
                "Topic [%s] not exist! Don't need delete" % topic_name)
            raise ActionError(KafkaErr.TopicNotExist)

    def create_partition(self):
        """
        为现有主题创建其他分区
        :return:
        """

    def list_consumer_groups(self):
        """
        列出集群中的消费者集群
        :return:
        """
        return self.admin_client.list_consumer_groups()