Example #1
0
    def alter_config_for_topic(
        self, topic: Topic, dry_run=False, respect_existing_config=False
    ):
        """
        Alter the configuration of a single topic.
        :topic a Topic instance
        :respect_existing_config merge existing conig into new
        :dry_run perform dry-run only
        """

        new_config = {}
        # First get existing configs.. so really "old config" at this stage
        existing_config = {
            val.name: val.value
            for (key, val) in self.describe_topic(topic.name).items()
        }
        if respect_existing_config:
            new_config.update(existing_config)
        # And update with changed values to get real new config
        new_config.update(topic.configs)
        new_config = self._sanitize_topic_config(new_config)
        # get a config delta
        config_delta = self._get_config_diff(existing_config, new_config)

        resource = ConfigResource(restype=Type.TOPIC, name=topic.name)
        for key, value in new_config.items():
            resource.set_config(key, value)
        fs = self.adminclient.alter_configs(
            [resource], request_timeout=30, validate_only=dry_run
        )
        # check result

        configs_altered = []
        configs_failed = {}
        for res, future in fs.items():
            try:
                future.result()
                configs_altered.append(res)
            except Exception as e:
                configs_failed[res] = str(e)
        if topic.name not in self.dry_run_plan:
            self._update_plan(
                topic.name,
                {
                    "topic": topic,
                    "reason": None,
                    "config_delta": config_delta,
                },
            )
        self._update_plan(
            topic.name,
            {"configs_altered": configs_altered, "configs_failed": configs_failed},
        )
        return (configs_altered, configs_failed, config_delta)
Example #2
0
    def _configure_message_type(self, admin_id: str, message_type: str,
                                **kwargs):
        """
        Sets expiration time for individual messages types.

        Parameters:
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        Kwargs:
        expire_time_ms  This should be the expire time of message_type
                        in milliseconds.
        data_limit_bytes This should be the max size of log files
                         for individual message_type.
        """
        admin = self._clients['admin'][admin_id]
        Log.debug(f"New configuration for message "\
            f"type {message_type} with admin id {admin_id}")
        # check for message_type exist or not
        message_type_list = self.list_message_types(admin_id)
        if message_type not in message_type_list:
            raise MessageBusError(errno.ENOENT, "Unknown Message type:"+\
                " not listed in %s", message_type_list)
        topic_resource = ConfigResource('topic', message_type)
        for tuned_retry in range(self._max_config_retry_count):
            for key, val in kwargs.items():
                if key not in self._config_prop_map:
                    raise MessageBusError(errno.EINVAL,\
                        "Invalid configuration %s for message_type %s.", key,\
                        message_type)
                topic_resource.set_config(self._config_prop_map[key], val)
            tuned_params = admin.alter_configs([topic_resource])
            if list(tuned_params.values())[0].result() is not None:
                if tuned_retry > 1:
                    Log.error(f"MessageBusError: {errors.ERR_OP_FAILED} "\
                        f"Updating message type expire time by "\
                        f"alter_configs() for resource {topic_resource} "\
                        f"failed using admin {admin} for message type "\
                        f"{message_type}")
                    raise MessageBusError(errors.ERR_OP_FAILED,\
                        "Updating message type expire time by "+\
                        "alter_configs() for resource %s failed using admin" +\
                        "%s for message type %s", topic_resource, admin,\
                        message_type)
                continue
            else:
                break
        Log.debug("Successfully updated message type with new"+\
            " configuration.")
        return 0
Example #3
0
def kafka_alter_configs(a, args):
    """ Alter configs atomically, replacing non-specified
    configuration properties with their default values.
    """

    resources = []
    for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
        resource = ConfigResource(restype, resname)
        resources.append(resource)
        for k, v in [conf.split('=') for conf in configs.split(',')]:
            resource.set_config(k, v)

    fs = a.alter_configs(resources)

    # Wait for operation to finish.
    for res, f in fs.items():
        try:
            f.result()  # empty, but raises exception on failure
            print("{} configuration successfully altered".format(res))
        except Exception:
            raise
def example_alter_configs(a, args):
    """ Alter configs atomically, replacing non-specified
    configuration properties with their default values.
    """

    resources = []
    for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
        resource = ConfigResource(restype, resname)
        resources.append(resource)
        for k, v in [conf.split('=') for conf in configs.split(',')]:
            resource.set_config(k, v)

    fs = a.alter_configs(resources)

    # Wait for operation to finish.
    for res, f in fs.items():
        try:
            f.result()  # empty, but raises exception on failure
            print("{} configuration successfully altered".format(res))
        except Exception:
            raise
Example #5
0
class KafkaMessageBroker(MessageBroker):
    """ Kafka Server based message broker implementation """

    name = 'kafka'

    # Retention period in Milliseconds
    _default_msg_retention_period = 604800000
    _min_msg_retention_period = 1

    # Maximum retry count
    _max_config_retry_count = 3
    _max_purge_retry_count = 5

    # Polling timeout
    _default_timeout = 0.5

    def __init__(self, broker_conf: dict):
        """ Initialize Kafka based Configurations """
        super().__init__(broker_conf)

        kafka_conf = {'bootstrap.servers': self._servers}
        self._admin = AdminClient(kafka_conf)

        self._clients = {'producer': {}, 'consumer': {}}

    def init_client(self, client_type: str, **client_conf: dict):
        """ Obtain Kafka based Producer/Consumer """
        """ Validate and return if client already exists """
        if client_type not in self._clients.keys():
            raise MessageBusError(errno.EINVAL, "Invalid client type %s", \
                client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']

        if client_type == 'producer':
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic',
                                            client_conf['message_type'])
            conf = self._admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    raise MessageBusError(
                        errno.EINVAL, "Missing required \
                        config parameter %s", params)

            self._saved_retention = int(default_configs['retention.ms']\
                .__dict__['value'])

            # Set retention to default if the value is 1 ms
            self._saved_retention = self._default_msg_retention_period if \
                self._saved_retention == self._min_msg_retention_period else \
                int(default_configs['retention.ms'].__dict__['value'])

        else:
            for entry in ['offset', 'consumer_group', 'message_type', \
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    raise MessageBusError(
                        errno.EINVAL, "Missing conf entry \
                        %s", entry)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_type'])
            self._clients[client_type][client_conf['client_id']] = consumer

    def send(self, producer_id: str, message_type: str, method: str, \
        messages: list, timeout=0.1):
        """ Sends list of messages to Kafka cluster(s) """
        producer = self._clients['producer'][producer_id]
        if producer is None:
            raise MessageBusError(
                errno.EINVAL, "Producer %s is not \
                initialized", producer_id)

        for message in messages:
            producer.produce(message_type, bytes(message, 'utf-8'))
            if method == 'sync':
                producer.flush()
            else:
                producer.poll(timeout=timeout)

    def get_log_size(self, message_type: str):
        """ Gets size of log across all the partitions """
        total_size = 0
        cmd = "/opt/kafka/bin/kafka-log-dirs.sh --describe --bootstrap-server "\
            + self._servers + " --topic-list " + message_type
        try:
            cmd_proc = SimpleProcess(cmd)
            run_result = cmd_proc.run()
            decoded_string = run_result[0].decode('utf-8')
            output_json = json.loads(re.search(r'({.+})', decoded_string).\
                group(0))
            for brokers in output_json['brokers']:
                partition = brokers['logDirs'][0]['partitions']
                for each_partition in partition:
                    total_size += each_partition['size']
            return total_size
        except Exception as e:
            raise MessageBusError(
                errno.EINVAL, "Unable to fetch log size for \
                message type %s. %s", message_type, e)

    def delete(self, message_type: str):
        """ Deletes all the messages from Kafka cluster(s) """
        for tuned_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', \
                self._min_msg_retention_period)
            tuned_params = self._admin.alter_configs([self._resource])
            if list(tuned_params.values())[0].result() is not None:
                if tuned_retry > 1:
                    raise MessageBusError(
                        errno.EINVAL, "Unable to change \
                        retention for %s", message_type)
                continue
            else:
                break

        for retry_count in range(1, (self._max_purge_retry_count + 2)):
            if retry_count > self._max_purge_retry_count:
                raise MessageBusError(
                    errno.EINVAL, "Unable to delete \
                    messages for %s", message_type)
            time.sleep(0.1 * retry_count)
            log_size = self.get_log_size(message_type)
            if log_size == 0:
                break

        for default_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', self._saved_retention)
            default_params = self._admin.alter_configs([self._resource])
            if list(default_params.values())[0].result() is not None:
                if default_retry > 1:
                    raise MessageBusError(
                        errno.EINVAL, "Unknown configuration \
                        for %s", message_type)
                continue
            else:
                break

    def receive(self, consumer_id: str, timeout: float = None) -> list:
        """
        Receives list of messages from Kafka Message Server

        Parameters:
        consumer_id     Consumer ID for which messages are to be retrieved
        timeout         Time in seconds to wait for the message. Timeout of 0
                        will lead to blocking indefinitely for the message
        """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            raise MessageBusError(
                errno.EINVAL, "Consumer %s is not \
                initialized", consumer_id)

        if timeout is None:
            timeout = self._default_timeout

        try:
            while True:
                msg = consumer.poll(timeout=timeout)
                if msg is None:
                    # if blocking (timeout=0), NoneType messages are ignored
                    if timeout > 0:
                        return None
                elif msg.error():
                    raise MessageBusError(errno.ECONN, "Cant receive. %s", \
                        msg.error())
                else:
                    return msg.value()
        except KeyboardInterrupt:
            raise MessageBusError(errno.EINVAL, "Cant Receive %s")

    def ack(self, consumer_id: str):
        """ To manually commit offset """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            raise MessageBusError(
                errno.EINVAL, "Consumer %s is not \
                initialized", consumer_id)
        consumer.commit(async=False)
class KafkaMessageBroker(MessageBroker):
    """ Kafka Server based message broker implementation """

    name = 'kafka'

    def __init__(self, broker_conf: dict):
        """ Initialize Kafka based Configurations """
        super().__init__(broker_conf)

        kafka_conf = {'bootstrap.servers': self._servers}
        self._admin = AdminClient(kafka_conf)

        self._clients = {'producer': {}, 'consumer': {}}

    def init_client(self, client_type: str, **client_conf: dict):
        """ Obtain Kafka based Producer/Consumer """
        """ Validate and return if client already exists """
        if client_type not in self._clients.keys():
            raise MessageBusError(errno.EINVAL, "Invalid client type %s", \
                client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']

        if client_type == 'producer':
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic',
                                            client_conf['message_type'])
            conf = self._admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    raise MessageBusError(
                        errno.EINVAL, "Missing required \
                        config parameter %s", params)

            self._saved_retention = int(default_configs['retention.ms'] \
                                       .__dict__['value'])

        else:
            for entry in ['offset', 'consumer_group', 'message_type', \
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    raise MessageBusError(
                        errno.EINVAL, "Missing conf entry \
                        %s", entry)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_type'])
            self._clients[client_type][client_conf['client_id']] = consumer

    def send(self, producer_id: str, message_type: str, method: str, \
        messages: list, timeout=0.1):
        """ Sends list of messages to Kafka cluster(s) """
        producer = self._clients['producer'][producer_id]
        if producer is None:
            raise MessageBusError(
                errno.EINVAL, "Producer %s is not \
                initialized", producer_id)

        for message in messages:
            producer.produce(message_type, bytes(message, 'utf-8'))
            if method == 'sync':
                producer.flush()
            else:
                producer.poll(timeout=timeout)

    def delete(self, message_type: str):
        """ Deletes all the messages from Kafka cluster(s) """
        for i in range(3):
            self._resource.set_config('retention.ms', 1)
            tuned_params = self._admin.alter_configs([self._resource])
            if list(tuned_params.values())[0].result() is not None:
                if i > 1:
                    raise MessageBusError(
                        errno.EINVAL, "Unable to delete \
                    messages for %s", message_type)
                continue
            else:
                break

        for i in range(3):
            self._resource.set_config('retention.ms', self._saved_retention)
            default_params = self._admin.alter_configs([self._resource])
            if list(default_params.values())[0].result() is not None:
                if i > 1:
                    raise MessageBusError(
                        errno.EINVAL, "Unknown configuration \
                        for %s", message_type)
                continue
            else:
                break

    def receive(self, consumer_id: str, timeout=0.5) -> list:
        """ Receives list of messages from Kafka cluster(s) """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            raise MessageBusError(
                errno.EINVAL, "Consumer %s is not \
                initialized", consumer_id)

        try:
            msg = consumer.poll(timeout=timeout)
            if msg is None:
                pass
            if msg.error():
                raise MessageBusError(errno.ECONN, "Cant receive. %s", \
                    msg.error())
            else:
                return msg.value()
        except KeyboardInterrupt:
            raise MessageBusError(errno.EINVAL, "Cant Recieve %s")

    def ack(self, consumer_id: str):
        """ To manually commit offset """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            raise MessageBusError(
                errno.EINVAL, "Consumer %s is not \
                initialized", consumer_id)
        consumer.commit(async=False)
Example #7
0
class KafkaMessageBroker(MessageBroker):
    """ Kafka Server based message broker implementation """

    name = 'kafka'

    # Retention period in Milliseconds
    _default_msg_retention_period = 604800000
    _min_msg_retention_period = 1

    # Maximum retry count
    _max_config_retry_count = 3
    _max_purge_retry_count = 5
    _max_list_message_type_count = 15

    # Polling timeout
    _default_timeout = 0.5

    def __init__(self, broker_conf: dict):
        """ Initialize Kafka based Configurations """
        super().__init__(broker_conf)

        self._clients = {'admin': {}, 'producer': {}, 'consumer': {}}

    def init_client(self, client_type: str, **client_conf: dict):
        """ Obtain Kafka based Producer/Consumer """

        """ Validate and return if client already exists """
        if client_type not in self._clients.keys():
            raise MessageBusError(errno.EINVAL, "Invalid client type %s", \
                client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']

        if client_type == 'admin' or self._clients['admin'] == {}:
            if client_type != 'consumer':
                self.admin = AdminClient(kafka_conf)
                self._clients['admin'][client_conf['client_id']] = self.admin

        if client_type == 'producer':
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic', client_conf['message_type'])
            conf = self.admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    raise MessageBusError(errno.EINVAL, "Missing required \
                        config parameter %s", params)

            self._saved_retention = int(default_configs['retention.ms']\
                .__dict__['value'])

            # Set retention to default if the value is 1 ms
            self._saved_retention = self._default_msg_retention_period if \
                self._saved_retention == self._min_msg_retention_period else \
                int(default_configs['retention.ms'].__dict__['value'])

        elif client_type == 'consumer':
            for entry in ['offset', 'consumer_group', 'message_types', \
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    raise MessageBusError(errno.EINVAL, "Missing conf entry \
                        %s", entry)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_types'])
            self._clients[client_type][client_conf['client_id']] = consumer

    def _task_status(self, tasks :dict):
        """ Check if the task is completed successfully """
        for message_type, f in tasks.items():
            try:
                f.result()  # The result itself is None
            except Exception as e:
                raise MessageBusError(errno.EINVAL, "Admin operation fails for \
                    %s. %s", message_type, e)

    def _get_metadata(self, admin: object):
        """ To get the metadata information of message type """
        try:
            message_type_metadata = admin.list_topics().__dict__
            return message_type_metadata['topics']
        except Exception as e:
            raise MessageBusError(errno.EINVAL, "Unable to list message type. \
                %s", e)

    def list_message_types(self, admin_id: str) -> list:
        """
        Returns a list of existing message types.

        Parameters:
        admin_id        A String that represents Admin client ID.

        Return Value:
        Returns list of message types e.g. ["topic1", "topic2", ...]
        """
        admin = self._clients['admin'][admin_id]
        return list(self._get_metadata(admin).keys())

    def register_message_type(self, admin_id: str, message_types: list, \
        partitions: int):
        """
        Creates a list of message types.

        Parameters:
        admin_id        A String that represents Admin client ID.
        message_types   This is essentially equivalent to the list of
                        queue/topic name. For e.g. ["Alert"]
        partitions      Integer that represents number of partitions to be
                        created.
        """
        admin = self._clients['admin'][admin_id]
        new_message_type = [NewTopic(each_message_type, \
            num_partitions=partitions) for each_message_type in message_types]
        created_message_types = admin.create_topics(new_message_type)
        self._task_status(created_message_types)

        for each_message_type in message_types:
            for list_retry in range(1, self._max_list_message_type_count+2):
                if each_message_type not in \
                    list(self._get_metadata(admin).keys()):
                    if list_retry > self._max_list_message_type_count:
                        raise MessageBusError(errno.EINVAL, "Maximum retries \
                            exceeded for creating %s.", each_message_type)
                    time.sleep(list_retry*1)
                    continue
                else:
                    break

    def deregister_message_type(self, admin_id: str, message_types: list):
        """
        Deletes a list of message types.

        Parameters:
        admin_id        A String that represents Admin client ID.
        message_types   This is essentially equivalent to the list of
                        queue/topic name. For e.g. ["Alert"]
        """
        admin = self._clients['admin'][admin_id]
        deleted_message_types = admin.delete_topics(message_types)
        self._task_status(deleted_message_types)

        for each_message_type in message_types:
            for list_retry in range(1, self._max_list_message_type_count+2):
                if each_message_type in list(self._get_metadata(admin).keys()):
                    if list_retry > self._max_list_message_type_count:
                        raise MessageBusError(errno.EINVAL, "Maximum retries \
                            exceeded for deleting %s.", each_message_type)
                    time.sleep(list_retry*1)
                    continue
                else:
                    break

    def add_concurrency(self, admin_id: str, message_type: str, \
        concurrency_count: int):
        """
        Increases the partitions for a message type.

        Parameters:
        admin_id            A String that represents Admin client ID.
        message_type        This is essentially equivalent to queue/topic name.
                            For e.g. "Alert"
        concurrency_count   Integer that represents number of partitions to be
                            increased.

        Note:  Number of partitions for a message type can only be increased,
               never decreased
        """
        admin = self._clients['admin'][admin_id]
        new_partition = [NewPartitions(message_type, \
            new_total_count=concurrency_count)]
        partitions = admin.create_partitions(new_partition)
        self._task_status(partitions)

        # Waiting for few seconds to complete the partition addition process
        for list_retry in range(1, self._max_list_message_type_count+2):
            if concurrency_count != len(self._get_metadata(admin)\
                [message_type].__dict__['partitions']):
                if list_retry > self._max_list_message_type_count:
                    raise MessageBusError(errno.EINVAL, "Maximum retries \
                        exceeded to increase concurrency for %s.", \
                        message_type)
                time.sleep(list_retry*1)
                continue
            else:
                break

    def send(self, producer_id: str, message_type: str, method: str, \
        messages: list, timeout=0.1):
        """
        Sends list of messages to Kafka cluster(s)

        Parameters:
        producer_id     A String that represents Producer client ID.
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        method          Can be set to "sync" or "async"(default).
        messages        A list of messages sent to Kafka Message Server
        """
        producer = self._clients['producer'][producer_id]
        if producer is None:
            raise MessageBusError(errno.EINVAL, "Producer %s is not \
                initialized", producer_id)

        for message in messages:
            producer.produce(message_type, bytes(message, 'utf-8'))
            if method == 'sync':
                producer.flush()
            else:
                producer.poll(timeout=timeout)

    def get_log_size(self, message_type: str):
        """ Gets size of log across all the partitions """
        total_size = 0
        cmd = "/opt/kafka/bin/kafka-log-dirs.sh --describe --bootstrap-server "\
            + self._servers + " --topic-list " + message_type
        try:
            cmd_proc = SimpleProcess(cmd)
            run_result = cmd_proc.run()
            decoded_string = run_result[0].decode('utf-8')
            output_json = json.loads(re.search(r'({.+})', decoded_string).\
                group(0))
            for brokers in output_json['brokers']:
                partition = brokers['logDirs'][0]['partitions']
                for each_partition in partition:
                    total_size += each_partition['size']
            return total_size
        except Exception as e:
            raise MessageBusError(errno.EINVAL, "Unable to fetch log size for \
                message type %s. %s" , message_type, e)

    def delete(self, admin_id: str, message_type: str):
        """
        Deletes all the messages from Kafka cluster(s)

        Parameters:
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        """
        admin = self._clients['admin'][admin_id]

        for tuned_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', \
                self._min_msg_retention_period)
            tuned_params = admin.alter_configs([self._resource])
            if list(tuned_params.values())[0].result() is not None:
                if tuned_retry > 1:
                    raise MessageBusError(errno.EINVAL, "Unable to change \
                        retention for %s", message_type)
                continue
            else:
                break

        for retry_count in range(1, (self._max_purge_retry_count + 2)):
            if retry_count > self._max_purge_retry_count:
                raise MessageBusError(errno.EINVAL, "Unable to delete \
                    messages for %s", message_type)
            time.sleep(0.1*retry_count)
            log_size = self.get_log_size(message_type)
            if log_size == 0:
                break

        for default_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', self._saved_retention)
            default_params = admin.alter_configs([self._resource])
            if list(default_params.values())[0].result() is not None:
                if default_retry > 1:
                    raise MessageBusError(errno.EINVAL, "Unknown configuration \
                        for %s", message_type)
                continue
            else:
                break

    def get_unread_count(self, consumer_group: str):
        """
        Gets the count of unread messages from the Kafka message server

        Parameters:
        consumer_group  A String that represents Consumer Group ID.
        """
        table = []

        try:
            cmd = "/opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "\
                + self._servers + " --describe --group " + consumer_group
            cmd_proc = SimpleProcess(cmd)
            res_op, res_err, res_rc = cmd_proc.run()
            if res_rc != 0:
                raise MessageBusError(errno.EINVAL, "Unable to get the message \
                    count. %s", res_err)
            decoded_string = res_op.decode('utf-8')
            if decoded_string == '':
                raise MessageBusError(errno.EINVAL, "No active consumers in \
                    the consumer group, %s", consumer_group)
            elif 'Error' in decoded_string:
                raise MessageBusError(errno.EINVAL, "Unable to get the message \
                    count. %s", decoded_string)
            else:
                split_rows = decoded_string.split('\n')
                rows = [row.split(' ') for row in split_rows if row != '']
                for each_row in rows:
                    new_row = [item for item in each_row if item != '']
                    table.append(new_row)
                index = table[0].index('LAG')
                unread_count = [int(lag[index]) for lag in table if lag[index] \
                    != 'LAG' and lag[index] != '-']

                if len(unread_count) == 0:
                    raise MessageBusError(errno.EINVAL, "No active consumers \
                        in the consumer group, %s", consumer_group)
            return sum(unread_count)

        except Exception as e:
            raise MessageBusError(errno.EINVAL, "Unable to get the message \
                count. %s", e)

    def receive(self, consumer_id: str, timeout: float = None) -> list:
        """
        Receives list of messages from Kafka Message Server

        Parameters:
        consumer_id     Consumer ID for which messages are to be retrieved
        timeout         Time in seconds to wait for the message. Timeout of 0
                        will lead to blocking indefinitely for the message
        """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            raise MessageBusError(errno.EINVAL, "Consumer %s is not \
                initialized", consumer_id)

        if timeout is None:
            timeout = self._default_timeout

        try:
            while True:
                msg = consumer.poll(timeout=timeout)
                if msg is None:
                    # if blocking (timeout=0), NoneType messages are ignored
                    if timeout > 0:
                        return None
                elif msg.error():
                    raise MessageBusError(errno.ECONN, "Cant receive. %s", \
                        msg.error())
                else:
                    return msg.value()
        except KeyboardInterrupt:
            raise MessageBusError(errno.EINVAL, "Cant Receive %s")

    def ack(self, consumer_id: str):
        """ To manually commit offset """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            raise MessageBusError(errno.EINVAL, "Consumer %s is not \
                initialized", consumer_id)
        consumer.commit(async=False)
Example #8
0
def kafka_delta_alter_configs(a, args):
    """
    The AlterConfigs Kafka API requires all configuration to be passed,
    any left out configuration properties will revert to their default settings.

    This example shows how to just modify the supplied configuration entries
    by first reading the configuration from the broker, updating the supplied
    configuration with the broker configuration (without overwriting), and
    then writing it all back.

    The async nature of futures is also show-cased, which makes this example
    a bit more complex than it needs to be in the synchronous case.
    """

    # Convert supplied config to resources.
    # We can reuse the same resources both for describe_configs and
    # alter_configs.
    resources = []
    for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
        resource = ConfigResource(restype, resname)
        resources.append(resource)
        for k, v in [conf.split('=') for conf in configs.split(',')]:
            resource.set_config(k, v)

    # Set up a locked counter and an Event (for signaling) to track when the
    # second level of futures are done. This is a bit of contrived example
    # due to no other asynchronous mechanism being used, so we'll need
    # to wait on something to signal completion.

    class WaitZero(object):
        def __init__(self, waitcnt):
            self.cnt = waitcnt
            self.lock = threading.Lock()
            self.event = threading.Event()

        def decr(self):
            """ Decrement cnt by 1"""
            with self.lock:
                assert self.cnt > 0
                self.cnt -= 1
            self.event.set()

        def wait(self):
            """ Wait until cnt reaches 0 """
            self.lock.acquire()
            while self.cnt > 0:
                self.lock.release()
                self.event.wait()
                self.event.clear()
                self.lock.acquire()
            self.lock.release()

        def __len__(self):
            with self.lock:
                return self.cnt

    wait_zero = WaitZero(len(resources))

    # Read existing configuration from cluster
    fs = a.describe_configs(resources)

    def delta_alter_configs_done(fut, resource):
        e = fut.exception()
        if e is not None:
            print("Config update for {} failed: {}".format(resource, e))
        else:
            print("Config for {} updated".format(resource))
        wait_zero.decr()

    def delta_alter_configs(resource, remote_config):
        print(
            "Updating {} supplied config entries {} with {} config entries read from cluster"
            .format(len(resource), resource, len(remote_config)))
        # Only set configuration that is not default
        for k, entry in [(k, v) for k, v in remote_config.items()
                         if not v.is_default]:
            resource.set_config(k, entry.value, overwrite=False)

        fs = a.alter_configs([resource])
        fs[resource].add_done_callback(
            lambda fut: delta_alter_configs_done(fut, resource))

    # For each resource's future set up a completion callback
    # that in turn calls alter_configs() on that single resource.
    # This is ineffective since the resources can usually go in
    # one single alter_configs() call, but we're also show-casing
    # the futures here.
    for res, f in fs.items():
        f.add_done_callback(lambda fut, resource=res: delta_alter_configs(
            resource, fut.result()))

    # Wait for done callbacks to be triggered and operations to complete.
    print("Waiting for {} resource updates to finish".format(len(wait_zero)))
    wait_zero.wait()
Example #9
0
class KafkaMessageBroker(MessageBroker):
    """Kafka Server based message broker implementation."""

    name = 'kafka'

    # Retention period in Milliseconds
    _default_msg_retention_period = 604800000
    _min_msg_retention_period = 1

    # Maximum retry count
    _max_config_retry_count = 3
    _max_purge_retry_count = 5
    _max_list_message_type_count = 15
    _config_prop_map = {
        'expire_time_ms': 'retention.ms',
        'data_limit_bytes': 'segment.bytes',
        'file_delete_ms': 'file.delete.delay.ms',
    }

    def __init__(self, broker_conf: dict):
        """Initialize Kafka based Configurations."""
        super().__init__(broker_conf)
        Log.debug(f"KafkaMessageBroker: initialized with broker "\
            f"configurations broker_conf: {broker_conf}")
        self._clients = {'admin': {}, 'producer': {}, 'consumer': {}}

        # Polling timeout
        self._recv_message_timeout =\
            broker_conf['message_bus']['receive_timeout']
        # Socket timeout
        self._controller_socket_timeout =\
            broker_conf['message_bus']['socket_timeout']
        # Message timeout
        self._send_message_timeout =\
            broker_conf['message_bus']['send_timeout']
        # Admin API timeout
        self._admin_api_timeout =\
            broker_conf['message_bus']['admin_api_timeout']

    def init_client(self, client_type: str, **client_conf: dict):
        """Obtain Kafka based Producer/Consumer."""
        Log.debug(f"initializing client_type: {client_type},"\
            f" **kwargs {client_conf}")
        # Validate and return if client already exists
        if client_type not in self._clients.keys():
            Log.error(f"MessageBusError: Invalid client type "\
                f"{errors.ERR_INVALID_CLIENT_TYPE}, {client_type}")
            raise MessageBusError(errors.ERR_INVALID_CLIENT_TYPE,\
                "Invalid client type %s", client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                # Check if message_type exists to send/receive
                client = self._clients[client_type][client_conf['client_id']]
                available_message_types = client.list_topics().topics.keys()
                if client_type == 'producer':
                    if client_conf['message_type'] not in\
                        available_message_types:
                        Log.error(f"MessageBusError: message_type "\
                            f"{client_conf['message_type']} not found in "\
                            f"{available_message_types} for {client_type}")
                        raise MessageBusError(
                            errno.EINVAL, "Unknown Topic or \
                            Partition. %s", KafkaError(3))
                elif client_type == 'consumer':
                    if not any(each_message_type in available_message_types for\
                        each_message_type in client_conf['message_types']):
                        Log.error(f"MessageBusError: message_type "\
                            f"{client_conf['message_types']} not found in "\
                            f"{available_message_types} for {client_type}")
                        raise MessageBusError(
                            errno.EINVAL, "Unknown Topic or \
                            Partition. %s", KafkaError(3))
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']
        kafka_conf['error_cb'] = self._error_cb

        if client_type == 'admin' or client_type == 'producer':
            kafka_conf['socket.timeout.ms'] = self._controller_socket_timeout
            admin = AdminClient(kafka_conf)
            self._clients['admin'][client_conf['client_id']] = admin

        if client_type == 'producer':
            kafka_conf['message.timeout.ms'] = self._send_message_timeout
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic',\
                client_conf['message_type'])
            admin = self._clients['admin'][client_conf['client_id']]
            conf = admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    Log.error(f"MessageBusError: Missing required config"\
                        f" parameter {params}. for client type {client_type}")
                    raise MessageBusError(errno.ENOKEY,\
                        "Missing required config parameter %s. for " +\
                        "client type %s", params, client_type)

            self._saved_retention = int(default_configs['retention.ms']\
                .__dict__['value'])

            # Set retention to default if the value is 1 ms
            self._saved_retention = self._default_msg_retention_period if\
                self._saved_retention == self._min_msg_retention_period else\
                int(default_configs['retention.ms'].__dict__['value'])

        elif client_type == 'consumer':
            for entry in ['offset', 'consumer_group', 'message_types',\
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    Log.error(f"MessageBusError: Could not find entry "\
                        f"{entry} in conf keys for client type {client_type}")
                    raise MessageBusError(errno.ENOENT, "Could not find " +\
                        "entry %s in conf keys for client type %s", entry,\
                        client_type)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_types'])
            self._clients[client_type][client_conf['client_id']] = consumer

    def _task_status(self, tasks: dict, method: str):
        """Check if the task is completed successfully."""
        for task in tasks.values():
            try:
                task.result()  # The result itself is None
            except Exception as e:
                Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}."\
                    f" Admin operation fails for {method}. {e}")
                raise MessageBusError(errors.ERR_OP_FAILED,\
                    "Admin operation fails for %s. %s", method, e)

    def _get_metadata(self, admin: object):
        """To get the metadata information of message type."""
        try:
            message_type_metadata = admin.list_topics(
                timeout=self._admin_api_timeout).__dict__
            return message_type_metadata['topics']
        except KafkaException as e:
            Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. "\
                f"list_topics() failed. {e} Check if Kafka service is "\
                f"running successfully")
            raise MessageBusError(errors.ERR_OP_FAILED, "list_topics() " +\
                "failed. %s. Check if Kafka service is running successfully", e)
        except Exception as e:
            Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. "\
                f"list_topics() failed. {e} Check if Kafka service is "\
                f"running successfully")
            raise MessageBusError(errors.ERR_OP_FAILED, "list_topics() " +\
                "failed. %s. Check if Kafka service is running successfully", e)

    @staticmethod
    def _error_cb(err):
        """Callback to check if all brokers are down."""
        if err.code() == KafkaError._ALL_BROKERS_DOWN:
            raise MessageBusError(errors.ERR_SERVICE_UNAVAILABLE,\
                "Kafka service(s) unavailable. %s", err)

    def list_message_types(self, admin_id: str) -> list:
        """
        Returns a list of existing message types.

        Parameters:
        admin_id        A String that represents Admin client ID.

        Return Value:
        Returns list of message types e.g. ["topic1", "topic2", ...]
        """
        admin = self._clients['admin'][admin_id]
        return list(self._get_metadata(admin).keys())

    def register_message_type(self, admin_id: str, message_types: list,\
        partitions: int):
        """
        Creates a list of message types.

        Parameters:
        admin_id        A String that represents Admin client ID.
        message_types   This is essentially equivalent to the list of
                        queue/topic name. For e.g. ["Alert"]
        partitions      Integer that represents number of partitions to be
                        created.
        """
        Log.debug(f"Register message type {message_types} using {admin_id}"\
            f" with {partitions} partitions")
        admin = self._clients['admin'][admin_id]
        new_message_type = [NewTopic(each_message_type,\
            num_partitions=partitions) for each_message_type in message_types]
        created_message_types = admin.create_topics(new_message_type)
        self._task_status(created_message_types,
                          method='register_message_type')

        for each_message_type in message_types:
            for list_retry in range(1, self._max_list_message_type_count + 2):
                if each_message_type not in\
                    list(self._get_metadata(admin).keys()):
                    if list_retry > self._max_list_message_type_count:
                        Log.error(f"MessageBusError: Timed out after retry "\
                            f"{list_retry} while creating message_type "\
                            f"{each_message_type}")
                        raise MessageBusError(errno.ETIMEDOUT, "Timed out " +\
                            "after retry %d while creating message_type %s.",\
                            list_retry, each_message_type)
                    time.sleep(list_retry * 1)
                    continue
                else:
                    break

    def deregister_message_type(self, admin_id: str, message_types: list):
        """
        Deletes a list of message types.

        Parameters:
        admin_id        A String that represents Admin client ID.
        message_types   This is essentially equivalent to the list of
                        queue/topic name. For e.g. ["Alert"]
        """
        Log.debug(f"Deregister message type {message_types} using {admin_id}")
        admin = self._clients['admin'][admin_id]
        deleted_message_types = admin.delete_topics(message_types)
        self._task_status(deleted_message_types,\
            method='deregister_message_type')

        for each_message_type in message_types:
            for list_retry in range(1, self._max_list_message_type_count + 2):
                if each_message_type in list(self._get_metadata(admin).keys()):
                    if list_retry > self._max_list_message_type_count:
                        Log.error(f"MessageBusError: Timed out after "\
                            f"{list_retry} retry to delete message_type "\
                            f"{each_message_type}")
                        raise MessageBusError(errno.ETIMEDOUT,\
                            "Timed out after %d retry to delete message_type" +\
                            "%s.", list_retry, each_message_type)
                    time.sleep(list_retry * 1)
                    continue
                else:
                    break

    def add_concurrency(self, admin_id: str, message_type: str,\
        concurrency_count: int):
        """
        Increases the partitions for a message type.

        Parameters:
        admin_id            A String that represents Admin client ID.
        message_type        This is essentially equivalent to queue/topic name.
                            For e.g. "Alert"
        concurrency_count   Integer that represents number of partitions to be
                            increased.

        Note:  Number of partitions for a message type can only be increased,
               never decreased
        """
        Log.debug(f"Adding concurrency count {concurrency_count} for message"\
            f" type {message_type} with admin id {admin_id}")
        admin = self._clients['admin'][admin_id]
        new_partition = [NewPartitions(message_type,\
            new_total_count=concurrency_count)]
        partitions = admin.create_partitions(new_partition)
        self._task_status(partitions, method='add_concurrency')

        # Waiting for few seconds to complete the partition addition process
        for list_retry in range(1, self._max_list_message_type_count + 2):
            if concurrency_count != len(self._get_metadata(admin)\
                [message_type].__dict__['partitions']):
                if list_retry > self._max_list_message_type_count:
                    Log.error(f"MessageBusError: Exceeded retry count "\
                        f"{list_retry} for creating partitions for "\
                        f"message_type {message_type}")
                    raise MessageBusError(errno.E2BIG, "Exceeded retry count" +\
                        " %d for creating partitions for message_type" +\
                        " %s.", list_retry, message_type)
                time.sleep(list_retry * 1)
                continue
            else:
                break
        Log.debug(f"Successfully Increased the partitions for a "\
            f"{message_type} to {concurrency_count}")

    @staticmethod
    def delivery_callback(err, _):
        if err:
            raise MessageBusError(
                errno.ETIMEDOUT, "Message delivery failed. \
                %s", err)

    def send(self, producer_id: str, message_type: str, method: str,\
        messages: list, timeout=0.1):
        """
        Sends list of messages to Kafka cluster(s).

        Parameters:
        producer_id     A String that represents Producer client ID.
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        method          Can be set to "sync" or "async"(default).
        messages        A list of messages sent to Kafka Message Server
        """
        Log.debug(f"Producer {producer_id} sending list of messages "\
            f"{messages} of message type {message_type} to kafka server"\
            f" with method {method}")
        producer = self._clients['producer'][producer_id]
        if producer is None:
            Log.error(f"MessageBusError: "\
                f"{errors.ERR_SERVICE_NOT_INITIALIZED}. Producer: "\
                f"{producer_id} is not initialized")
            raise MessageBusError(errors.ERR_SERVICE_NOT_INITIALIZED,\
                "Producer %s is not initialized", producer_id)

        for message in messages:
            producer.produce(message_type, bytes(message, 'utf-8'),\
                callback=self.delivery_callback)
            if method == 'sync':
                producer.flush()
            else:
                producer.poll(timeout=timeout)
        Log.debug("Successfully Sent list of messages to Kafka cluster")

    def delete(self, admin_id: str, message_type: str):
        """
        Deletes all the messages of given message_type.

        Parameters:
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        """
        admin = self._clients['admin'][admin_id]
        Log.debug(f"Removing all messages from kafka cluster for message "\
            f"type {message_type} with admin id {admin_id}")

        for tuned_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms',\
                self._min_msg_retention_period)
            tuned_params = admin.alter_configs([self._resource])
            if list(tuned_params.values())[0].result() is not None:
                if tuned_retry > 1:
                    Log.error(f"MessageBusError: {errors.ERR_OP_FAILED} "\
                        f"alter_configs() for resource {self._resource} "\
                        f"failed using admin {admin} for message type "\
                        f"{message_type}")
                    raise MessageBusError(errors.ERR_OP_FAILED,\
                        "alter_configs() for resource %s failed using admin" +\
                        "%s for message type %s", self._resource, admin,\
                        message_type)
                continue
            else:
                break

        # Sleep for a second to delete the messages
        time.sleep(1)

        for default_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', self._saved_retention)
            default_params = admin.alter_configs([self._resource])
            if list(default_params.values())[0].result() is not None:
                if default_retry > 1:
                    Log.error(f"MessageBusError: {errno.ENOKEY} Unknown "\
                        f"configuration for message type {message_type}.")
                    raise MessageBusError(errno.ENOKEY, "Unknown " +\
                        "configuration for message type %s.", message_type)
                continue
            else:
                break
        Log.debug(f"Successfully deleted all the messages of message_type: "\
            f"{message_type}")
        return 0

    def receive(self, consumer_id: str, timeout: float = None) -> list:
        """
        Receives list of messages from Kafka Message Server.

        Parameters:
        consumer_id     Consumer ID for which messages are to be retrieved
        timeout         Time in seconds to wait for the message. Timeout of 0
                        will lead to blocking indefinitely for the message
        """
        blocking = False

        consumer = self._clients['consumer'][consumer_id]
        Log.debug(f"Receiving list of messages from kafka Message server of"\
            f" consumer_id {consumer_id}, and timeout is {timeout}")
        if consumer is None:
            Log.error(f"MessageBusError: {errors.ERR_SERVICE_NOT_INITIALIZED}"\
                f" Consumer {consumer_id} is not initialized.")
            raise MessageBusError(errors.ERR_SERVICE_NOT_INITIALIZED,\
                "Consumer %s is not initialized.", consumer_id)

        if timeout is None:
            timeout = self._recv_message_timeout
        if timeout == 0:
            timeout = self._recv_message_timeout
            blocking = True

        try:
            while True:
                msg = consumer.poll(timeout=timeout)
                if msg is None:
                    # if blocking (timeout=0), NoneType messages are ignored
                    if not blocking:
                        return None
                elif msg.error():
                    Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}"\
                        f" poll({timeout}) for consumer {consumer_id} failed"\
                        f" to receive message. {msg.error()}")
                    raise MessageBusError(errors.ERR_OP_FAILED, "poll(%s) " +\
                        "for consumer %s failed to receive message. %s",\
                        timeout, consumer_id, msg.error())
                else:
                    return msg.value()
        except KeyboardInterrupt:
            Log.error(f"MessageBusError: {errno.EINTR} Received Keyboard "\
                f"interrupt while trying to receive message for consumer "\
                f"{consumer_id}")
            raise MessageBusError(errno.EINTR, "Received Keyboard interrupt " +\
                "while trying to receive message for consumer %s", consumer_id)

    def ack(self, consumer_id: str):
        """To manually commit offset."""
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            Log.error(f"MessageBusError: {errors.ERR_SERVICE_NOT_INITIALIZED}"\
                f" Consumer {consumer_id} is not initialized.")
            raise MessageBusError(errors.ERR_SERVICE_NOT_INITIALIZED,\
                "Consumer %s is not initialized.", consumer_id)
        consumer.commit(async=False)

    def _configure_message_type(self, admin_id: str, message_type: str,
                                **kwargs):
        """
        Sets expiration time for individual messages types.

        Parameters:
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        Kwargs:
        expire_time_ms  This should be the expire time of message_type
                        in milliseconds.
        data_limit_bytes This should be the max size of log files
                         for individual message_type.
        """
        admin = self._clients['admin'][admin_id]
        Log.debug(f"New configuration for message "\
            f"type {message_type} with admin id {admin_id}")
        # check for message_type exist or not
        message_type_list = self.list_message_types(admin_id)
        if message_type not in message_type_list:
            raise MessageBusError(errno.ENOENT, "Unknown Message type:"+\
                " not listed in %s", message_type_list)
        topic_resource = ConfigResource('topic', message_type)
        for tuned_retry in range(self._max_config_retry_count):
            for key, val in kwargs.items():
                if key not in self._config_prop_map:
                    raise MessageBusError(errno.EINVAL,\
                        "Invalid configuration %s for message_type %s.", key,\
                        message_type)
                topic_resource.set_config(self._config_prop_map[key], val)
            tuned_params = admin.alter_configs([topic_resource])
            if list(tuned_params.values())[0].result() is not None:
                if tuned_retry > 1:
                    Log.error(f"MessageBusError: {errors.ERR_OP_FAILED} "\
                        f"Updating message type expire time by "\
                        f"alter_configs() for resource {topic_resource} "\
                        f"failed using admin {admin} for message type "\
                        f"{message_type}")
                    raise MessageBusError(errors.ERR_OP_FAILED,\
                        "Updating message type expire time by "+\
                        "alter_configs() for resource %s failed using admin" +\
                        "%s for message type %s", topic_resource, admin,\
                        message_type)
                continue
            else:
                break
        Log.debug("Successfully updated message type with new"+\
            " configuration.")
        return 0

    def set_message_type_expire(self, admin_id: str, message_type: str,\
        **kwargs):
        """
        Set message type expire with combinational unit of time and size.

        Kwargs:
        expire_time_ms  This should be the expire time of message_type
                        in milliseconds.
        data_limit_bytes This should be the max size of log files
                         for individual message_type.
        """
        Log.debug(f"Set expiration for message "\
            f"type {message_type} with admin id {admin_id}")

        for config_property in ['expire_time_ms', 'data_limit_bytes']:
            if config_property not in kwargs.keys():
                raise MessageBusError(errno.EINVAL,\
                    "Invalid message_type retention config key %s.", config_property)
        kwargs['file_delete_ms'] = 1
        return self._configure_message_type(admin_id, message_type, **kwargs)
def example_delta_alter_configs(a, args):
    """
    The AlterConfigs Kafka API requires all configuration to be passed,
    any left out configuration properties will revert to their default settings.

    This example shows how to just modify the supplied configuration entries
    by first reading the configuration from the broker, updating the supplied
    configuration with the broker configuration (without overwriting), and
    then writing it all back.

    The async nature of futures is also show-cased, which makes this example
    a bit more complex than it needs to be in the synchronous case.
    """

    # Convert supplied config to resources.
    # We can reuse the same resources both for describe_configs and
    # alter_configs.
    resources = []
    for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
        resource = ConfigResource(restype, resname)
        resources.append(resource)
        for k, v in [conf.split('=') for conf in configs.split(',')]:
            resource.set_config(k, v)

    # Set up a locked counter and an Event (for signaling) to track when the
    # second level of futures are done. This is a bit of contrived example
    # due to no other asynchronous mechanism being used, so we'll need
    # to wait on something to signal completion.

    class WaitZero(object):
        def __init__(self, waitcnt):
            self.cnt = waitcnt
            self.lock = threading.Lock()
            self.event = threading.Event()

        def decr(self):
            """ Decrement cnt by 1"""
            with self.lock:
                assert self.cnt > 0
                self.cnt -= 1
            self.event.set()

        def wait(self):
            """ Wait until cnt reaches 0 """
            self.lock.acquire()
            while self.cnt > 0:
                self.lock.release()
                self.event.wait()
                self.event.clear()
                self.lock.acquire()
            self.lock.release()

        def __len__(self):
            with self.lock:
                return self.cnt

    wait_zero = WaitZero(len(resources))

    # Read existing configuration from cluster
    fs = a.describe_configs(resources)

    def delta_alter_configs_done(fut, resource):
        e = fut.exception()
        if e is not None:
            print("Config update for {} failed: {}".format(resource, e))
        else:
            print("Config for {} updated".format(resource))
        wait_zero.decr()

    def delta_alter_configs(resource, remote_config):
        print("Updating {} supplied config entries {} with {} config entries read from cluster".format(
            len(resource), resource, len(remote_config)))
        # Only set configuration that is not default
        for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]:
            resource.set_config(k, entry.value, overwrite=False)

        fs = a.alter_configs([resource])
        fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource))

    # For each resource's future set up a completion callback
    # that in turn calls alter_configs() on that single resource.
    # This is ineffective since the resources can usually go in
    # one single alter_configs() call, but we're also show-casing
    # the futures here.
    for res, f in fs.items():
        f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result()))

    # Wait for done callbacks to be triggered and operations to complete.
    print("Waiting for {} resource updates to finish".format(len(wait_zero)))
    wait_zero.wait()
Example #11
0
class KafkaMessageBroker(MessageBroker):
    """ Kafka Server based message broker implementation """

    name = 'kafka'

    # Retention period in Milliseconds
    _default_msg_retention_period = 604800000
    _min_msg_retention_period = 1

    # Maximum retry count
    _max_config_retry_count = 3
    _max_purge_retry_count = 5
    _max_list_message_type_count = 15

    def __init__(self, broker_conf: dict):
        """ Initialize Kafka based Configurations """
        super().__init__(broker_conf)
        Log.debug(f"KafkaMessageBroker: initialized with broker " \
            f"configurations broker_conf: {broker_conf}")
        self._clients = {'admin': {}, 'producer': {}, 'consumer': {}}

        # Polling timeout
        self._recv_message_timeout = \
            broker_conf['message_bus']['recv_message_timeout']
        # Socket timeout
        self._controller_socket_timeout = \
            broker_conf['message_bus']['controller_socket_timeout']
        # Message timeout
        self._send_message_timeout = \
            broker_conf['message_bus']['send_message_timeout']

    def init_client(self, client_type: str, **client_conf: dict):
        """ Obtain Kafka based Producer/Consumer """
        Log.debug(f"initializing client_type: {client_type}," \
            f" **kwargs {client_conf}")
        """ Validate and return if client already exists """
        if client_type not in self._clients.keys():
            Log.error(f"MessageBusError: Invalid client type " \
                f"{errors.ERR_INVALID_CLIENT_TYPE}, {client_type}")
            raise MessageBusError(errors.ERR_INVALID_CLIENT_TYPE, \
                "Invalid client type %s", client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                # Check if message_type exists to send/receive
                client = self._clients[client_type][client_conf['client_id']]
                available_message_types = client.list_topics().topics.keys()
                if client_type == 'producer':
                    if client_conf['message_type'] not in \
                        available_message_types:
                        Log.error(f"MessageBusError: message_type " \
                            f"{client_conf['message_type']} not found in " \
                            f"{available_message_types} for {client_type}")
                        raise MessageBusError(
                            errno.EINVAL, "Unknown Topic or \
                            Partition. %s", KafkaError(3))
                elif client_type == 'consumer':
                    if not any(each_message_type in available_message_types for\
                        each_message_type in client_conf['message_types']):
                        Log.error(f"MessageBusError: message_type " \
                            f"{client_conf['message_types']} not found in " \
                            f"{available_message_types} for {client_type}")
                        raise MessageBusError(
                            errno.EINVAL, "Unknown Topic or \
                            Partition. %s", KafkaError(3))
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']
        kafka_conf['error_cb'] = self._error_cb

        if client_type == 'admin' or client_type == 'producer':
            kafka_conf['socket.timeout.ms'] = self._controller_socket_timeout
            admin = AdminClient(kafka_conf)
            self._clients['admin'][client_conf['client_id']] = admin

        if client_type == 'producer':
            kafka_conf['message.timeout.ms'] = self._send_message_timeout
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic', \
                client_conf['message_type'])
            admin = self._clients['admin'][client_conf['client_id']]
            conf = admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    Log.error(f"MessageBusError: Missing required config" \
                        f" parameter {params}. for client type {client_type}")
                    raise MessageBusError(errno.ENOKEY, \
                        "Missing required config parameter %s. for " +\
                        "client type %s", params, client_type)

            self._saved_retention = int(default_configs['retention.ms']\
                .__dict__['value'])

            # Set retention to default if the value is 1 ms
            self._saved_retention = self._default_msg_retention_period if \
                self._saved_retention == self._min_msg_retention_period else \
                int(default_configs['retention.ms'].__dict__['value'])

        elif client_type == 'consumer':
            for entry in ['offset', 'consumer_group', 'message_types', \
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    Log.error(f"MessageBusError: Could not find entry "\
                        f"{entry} in conf keys for client type {client_type}")
                    raise MessageBusError(errno.ENOENT, "Could not find " +\
                        "entry %s in conf keys for client type %s", entry, \
                        client_type)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_types'])
            self._clients[client_type][client_conf['client_id']] = consumer

    def _task_status(self, tasks: dict, method: str):
        """ Check if the task is completed successfully """
        for task in tasks.values():
            try:
                task.result()  # The result itself is None
            except Exception as e:
                Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}." \
                    f" Admin operation fails for {method}. {e}")
                raise MessageBusError(errors.ERR_OP_FAILED, \
                    "Admin operation fails for %s. %s", method, e)

    def _get_metadata(self, admin: object):
        """ To get the metadata information of message type """
        try:
            message_type_metadata = admin.list_topics().__dict__
            return message_type_metadata['topics']
        except KafkaException as e:
            Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. " \
                f"list_topics() failed. {e} Check if Kafka service is " \
                f"running successfully")
            raise MessageBusError(errors.ERR_OP_FAILED, "list_topics() " +\
                "failed. %s. Check if Kafka service is running successfully", e)
        except Exception as e:
            Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. " \
                f"list_topics() failed. {e} Check if Kafka service is " \
                f"running successfully")
            raise MessageBusError(errors.ERR_OP_FAILED, "list_topics() " + \
                "failed. %s. Check if Kafka service is running successfully", e)

    @staticmethod
    def _error_cb(err):
        """ Callback to check if all brokers are down """
        if err.code() == KafkaError._ALL_BROKERS_DOWN:
            raise MessageBusError(errors.ERR_SERVICE_UNAVAILABLE, \
                "Kafka service(s) unavailable. %s", err)

    def list_message_types(self, admin_id: str) -> list:
        """
        Returns a list of existing message types.

        Parameters:
        admin_id        A String that represents Admin client ID.

        Return Value:
        Returns list of message types e.g. ["topic1", "topic2", ...]
        """
        admin = self._clients['admin'][admin_id]
        return list(self._get_metadata(admin).keys())

    def register_message_type(self, admin_id: str, message_types: list, \
        partitions: int):
        """
        Creates a list of message types.

        Parameters:
        admin_id        A String that represents Admin client ID.
        message_types   This is essentially equivalent to the list of
                        queue/topic name. For e.g. ["Alert"]
        partitions      Integer that represents number of partitions to be
                        created.
        """
        Log.debug(f"Register message type {message_types} using {admin_id}" \
            f" with {partitions} partitions")
        admin = self._clients['admin'][admin_id]
        new_message_type = [NewTopic(each_message_type, \
            num_partitions=partitions) for each_message_type in message_types]
        created_message_types = admin.create_topics(new_message_type)
        self._task_status(created_message_types,
                          method='register_message_type')

        for each_message_type in message_types:
            for list_retry in range(1, self._max_list_message_type_count + 2):
                if each_message_type not in \
                    list(self._get_metadata(admin).keys()):
                    if list_retry > self._max_list_message_type_count:
                        Log.error(f"MessageBusError: Timed out after retry " \
                            f"{list_retry} while creating message_type " \
                            f"{each_message_type}")
                        raise MessageBusError(errno.ETIMEDOUT, "Timed out " +\
                            "after retry %d while creating message_type %s.", \
                            list_retry, each_message_type)
                    time.sleep(list_retry * 1)
                    continue
                else:
                    break

    def deregister_message_type(self, admin_id: str, message_types: list):
        """
        Deletes a list of message types.

        Parameters:
        admin_id        A String that represents Admin client ID.
        message_types   This is essentially equivalent to the list of
                        queue/topic name. For e.g. ["Alert"]
        """
        Log.debug(f"Deregister message type {message_types} using {admin_id}")
        admin = self._clients['admin'][admin_id]
        deleted_message_types = admin.delete_topics(message_types)
        self._task_status(deleted_message_types, \
            method='deregister_message_type')

        for each_message_type in message_types:
            for list_retry in range(1, self._max_list_message_type_count + 2):
                if each_message_type in list(self._get_metadata(admin).keys()):
                    if list_retry > self._max_list_message_type_count:
                        Log.error(f"MessageBusError: Timed out after " \
                            f"{list_retry} retry to delete message_type " \
                            f"{each_message_type}")
                        raise MessageBusError(errno.ETIMEDOUT, \
                            "Timed out after %d retry to delete message_type" +\
                            "%s.", list_retry, each_message_type)
                    time.sleep(list_retry * 1)
                    continue
                else:
                    break

    def add_concurrency(self, admin_id: str, message_type: str, \
        concurrency_count: int):
        """
        Increases the partitions for a message type.

        Parameters:
        admin_id            A String that represents Admin client ID.
        message_type        This is essentially equivalent to queue/topic name.
                            For e.g. "Alert"
        concurrency_count   Integer that represents number of partitions to be
                            increased.

        Note:  Number of partitions for a message type can only be increased,
               never decreased
        """
        Log.debug(f"Adding concurrency count {concurrency_count} for message" \
            f" type {message_type} with admin id {admin_id}")
        admin = self._clients['admin'][admin_id]
        new_partition = [NewPartitions(message_type, \
            new_total_count=concurrency_count)]
        partitions = admin.create_partitions(new_partition)
        self._task_status(partitions, method='add_concurrency')

        # Waiting for few seconds to complete the partition addition process
        for list_retry in range(1, self._max_list_message_type_count + 2):
            if concurrency_count != len(self._get_metadata(admin)\
                [message_type].__dict__['partitions']):
                if list_retry > self._max_list_message_type_count:
                    Log.error(f"MessageBusError: Exceeded retry count " \
                        f"{list_retry} for creating partitions for " \
                        f"message_type {message_type}")
                    raise MessageBusError(errno.E2BIG, "Exceeded retry count" +\
                        " %d for creating partitions for message_type" +\
                        " %s.", list_retry, message_type)
                time.sleep(list_retry * 1)
                continue
            else:
                break
        Log.debug(f"Successfully Increased the partitions for a " \
            f"{message_type} to {concurrency_count}")

    @staticmethod
    def delivery_callback(err, _):
        if err:
            raise MessageBusError(
                errno.ETIMEDOUT, "Message delivery failed. \
                %s", err)

    def send(self, producer_id: str, message_type: str, method: str, \
        messages: list, timeout=0.1):
        """
        Sends list of messages to Kafka cluster(s)

        Parameters:
        producer_id     A String that represents Producer client ID.
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        method          Can be set to "sync" or "async"(default).
        messages        A list of messages sent to Kafka Message Server
        """
        Log.debug(f"Producer {producer_id} sending list of messages " \
            f"{messages} of message type {message_type} to kafka server" \
            f" with method {method}")
        producer = self._clients['producer'][producer_id]
        if producer is None:
            Log.error(f"MessageBusError: " \
                f"{errors.ERR_SERVICE_NOT_INITIALIZED}. Producer: " \
                f"{producer_id} is not initialized")
            raise MessageBusError(errors.ERR_SERVICE_NOT_INITIALIZED,\
                "Producer %s is not initialized", producer_id)

        for message in messages:
            producer.produce(message_type, bytes(message, 'utf-8'), \
                callback=self.delivery_callback)
            if method == 'sync':
                producer.flush()
            else:
                producer.poll(timeout=timeout)
        Log.debug("Successfully Sent list of messages to Kafka cluster")

    def get_log_size(self, message_type: str):
        """ Gets size of log across all the partitions """
        total_size = 0
        cmd = "/opt/kafka/bin/kafka-log-dirs.sh --describe --bootstrap-server "\
            + self._servers + " --topic-list " + message_type
        Log.debug(f"Retrieving log size for message_type {message_type}")
        try:
            cmd_proc = SimpleProcess(cmd)
            run_result = cmd_proc.run()
            decoded_string = run_result[0].decode('utf-8')
            output_json = json.loads(re.search(r'({.+})', decoded_string).\
                group(0))
            for brokers in output_json['brokers']:
                partition = brokers['logDirs'][0]['partitions']
                for each_partition in partition:
                    total_size += each_partition['size']
            Log.debug(f"Successfully retrived log size {total_size}")
            return total_size
        except Exception as e:
            Log.error(f"MessageBusError:{errors.ERR_OP_FAILED} Command {cmd}" \
                f" failed for message type {message_type} {e}")
            raise MessageBusError(errors.ERR_OP_FAILED, "Command %s failed" +\
                "for message type %s %s", cmd, message_type, e)

    def delete(self, admin_id: str, message_type: str):
        """
        Deletes all the messages from Kafka cluster(s)

        Parameters:
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        """
        admin = self._clients['admin'][admin_id]
        Log.debug(f"Removing all messages from kafka cluster for message " \
            f"type {message_type} with admin id {admin_id}")

        for tuned_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', \
                self._min_msg_retention_period)
            tuned_params = admin.alter_configs([self._resource])
            if list(tuned_params.values())[0].result() is not None:
                if tuned_retry > 1:
                    Log.error(f"MessageBusError: {errors.ERR_OP_FAILED} " \
                        f"alter_configs() for resource {self._resource} " \
                        f"failed using admin {admin} for message type " \
                        f"{message_type}")
                    raise MessageBusError(errors.ERR_OP_FAILED, \
                        "alter_configs() for resource %s failed using admin" +\
                        "%s for message type %s", self._resource, admin,\
                        message_type)
                continue
            else:
                break

        for retry_count in range(1, (self._max_purge_retry_count + 2)):
            if retry_count > self._max_purge_retry_count:
                Log.error(f"MessageBusError: {errors.ERR_OP_FAILED} Unable" \
                    f" to delete messages for message type {message_type}" \
                    f" using admin {admin} after {retry_count} retries")
                raise MessageBusError(errors.ERR_OP_FAILED,\
                    "Unable to delete messages for message type %s using " +\
                    "admin %s after %d retries", message_type, admin,\
                    retry_count)
            time.sleep(0.1 * retry_count)
            log_size = self.get_log_size(message_type)
            if log_size == 0:
                break

        for default_retry in range(self._max_config_retry_count):
            self._resource.set_config('retention.ms', self._saved_retention)
            default_params = admin.alter_configs([self._resource])
            if list(default_params.values())[0].result() is not None:
                if default_retry > 1:
                    Log.error(f"MessageBusError: {errno.ENOKEY} Unknown " \
                        f"configuration for message type {message_type}.")
                    raise MessageBusError(errno.ENOKEY, "Unknown " +\
                        "configuration for message type %s.", message_type)
                continue
            else:
                break
        Log.debug("Successfully Deleted all the messages from Kafka cluster.")

    def get_unread_count(self, message_type: str, consumer_group: str):
        """
        Gets the count of unread messages from the Kafka message server

        Parameters:
        message_type    This is essentially equivalent to the
                        queue/topic name. For e.g. "Alert"
        consumer_group  A String that represents Consumer Group ID.
        """
        table = []
        Log.debug(f"Getting unread messages count for message_type" \
            f" {message_type}, with consumer_group {consumer_group}")
        # Update the offsets if purge was called
        if self.get_log_size(message_type) == 0:
            cmd = "/opt/kafka/bin/kafka-consumer-groups.sh \
                --bootstrap-server "                                     + self._servers + " --group " \
                + consumer_group + " --topic " + message_type + \
                " --reset-offsets --to-latest --execute"
            cmd_proc = SimpleProcess(cmd)
            res_op, res_err, res_rc = cmd_proc.run()
            if res_rc != 0:
                Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. Command" \
                    f" {cmd} failed for consumer group {consumer_group}." \
                    f" {res_err}")
                raise MessageBusError(errors.ERR_OP_FAILED, "Command %s " +\
                    "failed for consumer group %s. %s", cmd, consumer_group,\
                    res_err)
            decoded_string = res_op.decode("utf-8")
            if 'Error' in decoded_string:
                Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. Command" \
                    f" {cmd} failed for consumer group {consumer_group}. " \
                    f"{res_err}")
                raise MessageBusError(errors.ERR_OP_FAILED, "Command %s" + \
                    " failed for consumer group %s. %s", cmd, \
                    consumer_group, res_err)
        cmd = "/opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "\
            + self._servers + " --describe --group " + consumer_group
        cmd_proc = SimpleProcess(cmd)
        res_op, res_err, res_rc = cmd_proc.run()
        if res_rc != 0:
            Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}. command " \
                f"{cmd} failed for consumer group {consumer_group}. " \
                f"{res_err}.")
            raise MessageBusError(errors.ERR_OP_FAILED, "command %s " + \
                "failed for consumer group %s. %s.", cmd, consumer_group, \
                res_err)
        decoded_string = res_op.decode("utf-8")
        if decoded_string == "":
            Log.error(f"MessageBusError: {errno.ENOENT}. No active consumers" \
                f" in the consumer group, {consumer_group}.")
            raise MessageBusError(errno.ENOENT, "No active consumers" +\
                " in the consumer group, %s.", consumer_group)
        elif 'Error' in decoded_string:
            Log.error(f"{errors.ERR_OP_FAILED} command  {cmd} failed for " \
                f"consumer group {consumer_group}. {decoded_string}.")
            raise MessageBusError(errors.ERR_OP_FAILED, "command %s " +\
                "failed for consumer group %s. %s.", cmd, consumer_group,\
                decoded_string)
        else:
            split_rows = decoded_string.split("\n")
            rows = [row.split(' ') for row in split_rows if row != '']
            for each_row in rows:
                new_row = [item for item in each_row if item != '']
                table.append(new_row)
            message_type_index = table[0].index('TOPIC')
            lag_index = table[0].index('LAG')
            unread_count = [int(lag[lag_index]) for lag in table if \
                lag[lag_index] != 'LAG' and lag[lag_index] != '-' and \
                lag[message_type_index] == message_type]

            if len(unread_count) == 0:
                Log.error(
                    f"MessageBusError: {errno.ENOENT}. No active "
                    f"consumers in the consumer group, {consumer_group}.")
                raise MessageBusError(errno.ENOENT, "No active " +\
                    "consumers in the consumer group, %s.", consumer_group)
        Log.debug(f"Successfully Got the count of unread messages from the" \
            f" Kafka message server as {unread_count}")
        return sum(unread_count)

    def receive(self, consumer_id: str, timeout: float = None) -> list:
        """
        Receives list of messages from Kafka Message Server

        Parameters:
        consumer_id     Consumer ID for which messages are to be retrieved
        timeout         Time in seconds to wait for the message. Timeout of 0
                        will lead to blocking indefinitely for the message
        """
        blocking = False

        consumer = self._clients['consumer'][consumer_id]
        Log.debug(f"Receiving list of messages from kafka Message server of" \
            f" consumer_id {consumer_id}, and timeout is {timeout}")
        if consumer is None:
            Log.error(f"MessageBusError: {errors.ERR_SERVICE_NOT_INITIALIZED}"\
                f" Consumer {consumer_id} is not initialized.")
            raise MessageBusError(errors.ERR_SERVICE_NOT_INITIALIZED, \
                "Consumer %s is not initialized.", consumer_id)

        if timeout is None:
            timeout = self._recv_message_timeout
        if timeout == 0:
            timeout = self._recv_message_timeout
            blocking = True

        try:
            while True:
                msg = consumer.poll(timeout=timeout)
                if msg is None:
                    # if blocking (timeout=0), NoneType messages are ignored
                    if not blocking:
                        return None
                elif msg.error():
                    Log.error(f"MessageBusError: {errors.ERR_OP_FAILED}" \
                        f" poll({timeout}) for consumer {consumer_id} failed" \
                        f" to receive message. {msg.error()}")
                    raise MessageBusError(errors.ERR_OP_FAILED, "poll(%s) " +\
                        "for consumer %s failed to receive message. %s", \
                        timeout, consumer_id, msg.error())
                else:
                    return msg.value()
        except KeyboardInterrupt:
            Log.error(f"MessageBusError: {errno.EINTR} Received Keyboard " \
                f"interrupt while trying to receive message for consumer " \
                f"{consumer_id}")
            raise MessageBusError(errno.EINTR, "Received Keyboard interrupt " +\
                "while trying to receive message for consumer %s", consumer_id)

    def ack(self, consumer_id: str):
        """ To manually commit offset """
        consumer = self._clients['consumer'][consumer_id]
        if consumer is None:
            Log.error(f"MessageBusError: {errors.ERR_SERVICE_NOT_INITIALIZED}"\
                f" Consumer {consumer_id} is not initialized.")
            raise MessageBusError(errors.ERR_SERVICE_NOT_INITIALIZED,\
                "Consumer %s is not initialized.", consumer_id)
        consumer.commit(async=False)
Example #12
0
def configure_topic(ctx, topic, settings, show_details):
    """Show and optionally change configurations for a topic
    """
    client = ctx.parent.obj['client']

    resource = ConfigResource(RESOURCE_TOPIC, topic)

    if settings:
        # Get the initial set of configurations. The alter_configs method
        # works atomically so all of the existing and new configurations need
        # to be passed, otherwise unset configurations get reverted to
        # defaults.
        fs = client.describe_configs([resource])
        configs = fs[resource].result()  # raises on failure
        configs = {k: c.value for k, c in configs.items()}

        # Override new configurations
        configs.update(dict(settings))

        # Convert strings to their native types. describe_configs() provides
        # all configuration values as str, but alter_configs wants values to
        # be the actual types (int, float, bool, str). What can you do, eh?
        convert_configs_to_native_types(configs)

        # Apply the entire configuration set to the source
        for key, value in configs.items():
            resource.set_config(key, value)

        # Alter the configurations on the server
        fs = client.alter_configs([resource])
        fs[resource].result()  # raises on failure

    # Read configurations
    fs = client.describe_configs([resource])
    configs = fs[resource].result()  # raises on failure

    if show_details:
        # Show detailed information about each ConfigEntry
        attrs = ('value', 'is_read_only', 'is_default', 'is_sensitive',
                 'is_synonym')
        config_data = {}
        for k, config in configs.items():
            config_data[k] = {
                a: getattr(config, a)
                for a in attrs if hasattr(config, a)
            }
            # source and synonyms need some type transforms to be useful
            try:
                config_data[k]['source'] = CONFIG_SOURCES[config.source]
            except KeyError:
                pass
            try:
                config_data[k]['synonyms'] = [
                    k for k, _ in config.synonyms.items()
                ]
            except AttributeError:
                pass
        print(json.dumps(config_data, sort_keys=True, indent=2))

    else:
        # Just show the values of each ConfigEntry
        config_values = {k: config.value for k, config in configs.items()}
        print(json.dumps(config_values, sort_keys=True, indent=2))