def _reconfigure(self): self.logger.info('publisher reconfiguration started') result = util.execute_frontend( self.tchannel, self.deployment_str, self.headers, self.timeout_seconds, 'readPublisherOptions', cherami.ReadPublisherOptionsRequest(path=self.path, )) hostAddresses = [] for host_protocol in result.hostProtocols: if host_protocol.protocol == cherami.Protocol.TCHANNEL: hostAddresses = host_protocol.hostAddresses break if not hostAddresses: raise Exception( "tchannel protocol is not supported by cherami server") host_connection_set = set( map(lambda h: util.get_connection_key(h), hostAddresses)) existing_connection_set = set(self.workers.keys()) missing_connection_set = host_connection_set - existing_connection_set extra_connection_set = existing_connection_set - host_connection_set # clean up for extra_conn in extra_connection_set: self.logger.info('cleaning up connection %s', extra_conn) self.workers[extra_conn].stop() del self.workers[extra_conn] # start up for missing_conn in missing_connection_set: self.logger.info('creating new connection %s', missing_conn) worker = PublisherThread(path=self.path, task_queue=self.task_queue, tchannel=self.tchannel, hostport=missing_conn, headers=self.headers, timeout_seconds=self.timeout_seconds, checksum_option=result.checksumOption) self.workers[missing_conn] = worker worker.start() self.logger.info('publisher reconfiguration succeeded')
def _reconfigure(self): self.logger.info('consumer reconfiguration started') hosts = util.execute_frontend( self.tchannel, self.deployment_str, {}, self.timeout_seconds, 'readConsumerGroupHosts', cherami.ReadConsumerGroupHostsRequest( destinationPath=self.path, consumerGroupName=self.consumer_group_name)) host_connections = map(lambda h: util.get_connection_key(h), hosts.hostAddresses) \ if hosts.hostAddresses is not None else [] host_connection_set = set(host_connections) existing_connection_set = set(self.consumer_threads.keys()) missing_connection_set = host_connection_set - existing_connection_set extra_connection_set = existing_connection_set - host_connection_set # clean up for extra_conn in extra_connection_set: self.logger.info('cleaning up connection %s', extra_conn) self.consumer_threads[extra_conn].stop() del self.consumer_threads[extra_conn] # start up for missing_conn in missing_connection_set: self.logger.info('creating new connection %s', missing_conn) consumer_thread = ConsumerThread( tchannel=self.tchannel, headers=self.headers, logger=self.logger, msg_queue=self.msg_queue, hostport=missing_conn, path=self.path, consumer_group_name=self.consumer_group_name, timeout_seconds=self.timeout_seconds, msg_batch_size=self.msg_batch_size) self.consumer_threads[missing_conn] = consumer_thread if self.start_consumer_thread: consumer_thread.start() self.logger.info('consumer reconfiguration succeeded')
def merge_DLQ_for_consumer_group(self, merge_DLQ_for_consumer_group_request): return util.execute_frontend(self.tchannel, self.deployment_str, self.headers, self.timeout_seconds, 'mergeDLQForConsumerGroup', merge_DLQ_for_consumer_group_request)
def read_consumer_group(self, read_consumer_group_request): return util.execute_frontend(self.tchannel, self.deployment_str, self.headers, self.timeout_seconds, 'readConsumerGroup', read_consumer_group_request)
def read_destination(self, read_destination_request): return util.execute_frontend(self.tchannel, self.deployment_str, self.headers, self.timeout_seconds, 'readDestination', read_destination_request)