def __init__(self, service, context_manager, settings_view_factory, scheduler_type, invalid_observers=None): """ Args: service (Interconnect): The zmq internal interface context_manager (ContextManager): Cache of state for tps settings_view_factory (SettingsViewFactory): Read-only view of setting state. Attributes: processor_manager (ProcessorManager): All of the registered transaction processors and a way to find the next one to send to. """ self._service = service self._context_manager = context_manager self.processor_manager = ProcessorManager(RoundRobinProcessorIterator) self._settings_view_factory = settings_view_factory self._executing_threadpool = \ InstrumentedThreadPoolExecutor(max_workers=5, name='Executing') self._alive_threads = [] self._lock = threading.Lock() self._invalid_observers = ([] if invalid_observers is None else invalid_observers) self._scheduler_type = scheduler_type
class TransactionExecutor: def __init__(self, service, context_manager, settings_view_factory, scheduler_type, invalid_observers=None): """ Args: service (Interconnect): The zmq internal interface context_manager (ContextManager): Cache of state for tps settings_view_factory (SettingsViewFactory): Read-only view of setting state. Attributes: processor_manager (ProcessorManager): All of the registered transaction processors and a way to find the next one to send to. """ self._service = service self._context_manager = context_manager self.processor_manager = ProcessorManager(RoundRobinProcessorIterator) self._settings_view_factory = settings_view_factory self._executing_threadpool = \ InstrumentedThreadPoolExecutor(max_workers=5, name='Executing') self._alive_threads = [] self._lock = threading.Lock() self._invalid_observers = ([] if invalid_observers is None else invalid_observers) self._scheduler_type = scheduler_type def create_scheduler(self, first_state_root, always_persist=False): # Useful for a logical first state root of "" if not first_state_root: first_state_root = self._context_manager.get_first_root() if self._scheduler_type == "serial": scheduler = SerialScheduler( squash_handler=self._context_manager.get_squash_handler(), first_state_hash=first_state_root, always_persist=always_persist) elif self._scheduler_type == "parallel": scheduler = ParallelScheduler( squash_handler=self._context_manager.get_squash_handler(), first_state_hash=first_state_root, always_persist=always_persist) else: raise AssertionError( "Scheduler type must be either serial or parallel. Current" " scheduler type is {}.".format(self._scheduler_type)) self.execute(scheduler=scheduler) return scheduler def check_connections(self): self._executing_threadpool.submit(self._check_connections) def _remove_done_threads(self): for t in self._alive_threads.copy(): if t.is_done(): with self._lock: self._alive_threads.remove(t) def _cancel_threads(self): for t in self._alive_threads: if not t.is_done(): t.cancel() def _check_connections(self): # This is not ideal, because it locks up the current thread while # waiting for the results. try: with self._lock: futures = {} for connection_id in \ self.processor_manager.get_all_processors(): fut = self._service.send( validator_pb2.Message.PING_REQUEST, network_pb2.PingRequest().SerializeToString(), connection_id=connection_id) futures[fut] = connection_id for fut in futures: try: fut.result(timeout=10) except FutureTimeoutError: LOGGER.warning( "%s did not respond to the Ping, removing " "transaction processor.", futures[fut]) self._remove_broken_connection(futures[fut]) except Exception: # pylint: disable=broad-except LOGGER.exception('Unhandled exception while checking connections') def _remove_broken_connection(self, connection_id): for t in self._alive_threads: t.remove_broken_connection(connection_id) def execute(self, scheduler): self._remove_done_threads() t = TransactionExecutorThread( service=self._service, context_manager=self._context_manager, scheduler=scheduler, processor_manager=self.processor_manager, settings_view_factory=self._settings_view_factory, invalid_observers=self._invalid_observers) self._executing_threadpool.submit(t.execute_thread) with self._lock: self._alive_threads.append(t) def stop(self): self._cancel_threads() self._executing_threadpool.shutdown(wait=True)