def _handle_group_coordinator_response(self, future, response): log.debug("Received group coordinator response %s", response) if not self._coordinator_unknown(): # We already found the coordinator, so ignore the request log.debug("Coordinator already known -- ignoring metadata response") future.success(self._coordinator_id) return error_type = Errors.for_code(response.error_code) if error_type is Errors.NoError: ok = self._client.cluster.add_group_coordinator(self.group_id, response) if not ok: # This could happen if coordinator metadata is different # than broker metadata future.failure(Errors.IllegalStateError()) return self._coordinator_id = response.coordinator_id log.info("Discovered coordinator %s for group %s", self._coordinator_id, self.group_id) self._client.ready(self._coordinator_id) future.success(self._coordinator_id) elif error_type is Errors.GroupCoordinatorNotAvailableError: log.debug("Group Coordinator Not Available; retry") future.failure(error_type()) elif error_type is Errors.GroupAuthorizationFailedError: error = error_type(self.group_id) log.error("Group Coordinator Request failed: %s", error) future.failure(error) else: error = error_type() log.error("Unrecognized failure in Group Coordinator Request: %s", error) future.failure(error)
def _handle_group_coordinator_response(self, future, response): log.debug("Received group coordinator response %s", response) error_type = Errors.for_code(response.error_code) if error_type is Errors.NoError: with self._lock: ok = self._client.cluster.add_group_coordinator( self.group_id, response) if not ok: # This could happen if coordinator metadata is different # than broker metadata future.failure(Errors.IllegalStateError()) return self.coordinator_id = response.coordinator_id log.info("Discovered coordinator %s for group %s", self.coordinator_id, self.group_id) self._client.ready(self.coordinator_id) self.heartbeat.reset_timeouts() future.success(self.coordinator_id) elif error_type is Errors.GroupCoordinatorNotAvailableError: log.debug("Group Coordinator Not Available; retry") future.failure(error_type()) elif error_type is Errors.GroupAuthorizationFailedError: error = error_type(self.group_id) log.error("Group Coordinator Request failed: %s", error) future.failure(error) else: error = error_type() log.error("Group coordinator lookup for group %s failed: %s", self.group_id, error) future.failure(error)
def _abort_batches(self): """Go through incomplete batches and abort them.""" error = Errors.IllegalStateError("Producer is closed forcefully.") for batch in self._incomplete.all(): tp = batch.topic_partition # Close the batch before aborting with self._tp_locks[tp]: batch.records.close() batch.done(exception=error) self.deallocate(batch)
def group_protocols(self): """Returns list of preferred (protocols, metadata)""" if self._subscription.subscription is None: raise Errors.IllegalStateError('Consumer has not subscribed to topics') # dpkp note: I really dislike this. # why? because we are using this strange method group_protocols, # which is seemingly innocuous, to set internal state (_joined_subscription) # that is later used to check whether metadata has changed since we joined a group # but there is no guarantee that this method, group_protocols, will get called # in the correct sequence or that it will only be called when we want it to be. # So this really should be moved elsewhere, but I don't have the energy to # work that out right now. If you read this at some later date after the mutable # state has bitten you... I'm sorry! It mimics the java client, and that's the # best I've got for now. self._joined_subscription = set(self._subscription.subscription) metadata_list = [] for assignor in self.config['assignors']: metadata = assignor.metadata(self._joined_subscription) group_protocol = (assignor.name, metadata) metadata_list.append(group_protocol) return metadata_list