def __init__(self, client, metrics, **configs): """ Keyword Arguments: group_id (str): name of the consumer group to join for dynamic partition assignment (if enabled), and to use for fetching and committing offsets. Default: 'kafka-python-default-group' session_timeout_ms (int): The timeout used to detect failures when using Kafka's group managementment facilities. Default: 30000 heartbeat_interval_ms (int): The expected time in milliseconds between heartbeats to the consumer coordinator when using Kafka's group management feature. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session_timeout_ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances. Default: 3000 retry_backoff_ms (int): Milliseconds to backoff when retrying on errors. Default: 100. """ self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] if self.config['api_version'] < (0, 10, 1): if self.config['max_poll_interval_ms'] != self.config[ 'session_timeout_ms']: raise Errors.KafkaConfigurationError( "Broker version %s does not support " "different values for max_poll_interval_ms " "and session_timeout_ms") self._client = client self.group_id = self.config['group_id'] self.heartbeat = Heartbeat(**self.config) self._heartbeat_thread = None self._lock = threading.Condition() self.rejoin_needed = True self.rejoining = False # renamed / complement of java needsJoinPrepare self.state = MemberState.UNJOINED self.join_future = None self.coordinator_id = None self._find_coordinator_future = None self._generation = Generation.NO_GENERATION self.sensors = GroupCoordinatorMetrics( self.heartbeat, metrics, self.config['metric_group_prefix'])
def __init__(self, client, subscription, metrics, **configs): """Initialize the coordination manager. Keyword Arguments: group_id (str): name of the consumer group to join for dynamic partition assignment (if enabled), and to use for fetching and committing offsets. Default: 'kafka-python-default-group' enable_auto_commit (bool): If true the consumer's offset will be periodically committed in the background. Default: True. auto_commit_interval_ms (int): milliseconds between automatic offset commits, if enable_auto_commit is True. Default: 5000. default_offset_commit_callback (callable): called as callback(offsets, exception) response will be either an Exception or None. This callback can be used to trigger custom actions when a commit request completes. assignors (list): List of objects to use to distribute partition ownership amongst consumer instances when group management is used. Default: [RangePartitionAssignor, RoundRobinPartitionAssignor] heartbeat_interval_ms (int): The expected time in milliseconds between heartbeats to the consumer coordinator when using Kafka's group management feature. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session_timeout_ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances. Default: 3000 session_timeout_ms (int): The timeout used to detect failures when using Kafka's group management facilities. Default: 30000 retry_backoff_ms (int): Milliseconds to backoff when retrying on errors. Default: 100. exclude_internal_topics (bool): Whether records from internal topics (such as offsets) should be exposed to the consumer. If set to True the only way to receive records from an internal topic is subscribing to it. Requires 0.10+. Default: True """ super(ConsumerCoordinator, self).__init__(client, metrics, **configs) self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] self._subscription = subscription self._is_leader = False self._joined_subscription = set() self._metadata_snapshot = self._build_metadata_snapshot( subscription, client.cluster) self._assignment_snapshot = None self._cluster = client.cluster self.auto_commit_interval = self.config[ "auto_commit_interval_ms"] / 1000 self.next_auto_commit_deadline = None self.completed_offset_commits = collections.deque() if self.config["default_offset_commit_callback"] is None: self.config[ "default_offset_commit_callback"] = self._default_offset_commit_callback if self.config["group_id"] is not None: if self.config["api_version"] >= (0, 9): if not self.config["assignors"]: raise Errors.KafkaConfigurationError( "Coordinator requires assignors") if self.config["api_version"] < (0, 10, 1): if (self.config["max_poll_interval_ms"] != self.config["session_timeout_ms"]): raise Errors.KafkaConfigurationError( "Broker version %s does not support " "different values for max_poll_interval_ms " "and session_timeout_ms") if self.config["enable_auto_commit"]: if self.config["api_version"] < (0, 8, 1): log.warning( "Broker version (%s) does not support offset" " commits; disabling auto-commit.", self.config["api_version"], ) self.config["enable_auto_commit"] = False elif self.config["group_id"] is None: log.warning("group_id is None: disabling auto-commit.") self.config["enable_auto_commit"] = False else: self.next_auto_commit_deadline = time.time( ) + self.auto_commit_interval self.consumer_sensors = ConsumerCoordinatorMetrics( metrics, self.config["metric_group_prefix"], self._subscription) self._cluster.request_update() self._cluster.add_listener(WeakMethod(self._handle_metadata_update))