def __init__(self, init_hdl=None, **init_kwargs): """ :param init_hdl: 子进程工作控制器的初始化函数。默认创建工作控制器池ControllerPool。 init_kwargs: 传递字典参数给子进程控制器初始化函数进行初始化。 - 默认 default_worker_initializer = _subprocess_worker_initializer(mapping, context) """ # 进程通信通道。 self._parent_channel, self._child_channel = create_process_channel_pairs( ) self._process = None # 同步进程事件。 self._workers_empty_queue_event = ProcessEvent() self._workers_idle_event = ProcessEvent() # 初始化进程同步事件。 self._workers_empty_queue_event.set() self._workers_idle_event.set() # 子进程工作线程控制器池初始化函数和参数。 if not init_hdl: init_hdl = default_worker_initializer self.__init_hdl = init_hdl self.__init_kwargs = init_kwargs # 被派遣处理过程中的事件id: Event。 self._unfinished_events = {} # 事件ID计数 self._event_count = 0 # 预实例对象,允许直接把子进程的工作线程当成虚拟实例来使用。 self.instances = { 'workers': VirtualInstance(self, 'workers'), 'bri_worker': VirtualInstance(self, 'bri_worker') }
def __init__(self, configs, cli_options=None, cfg_patches=None, cli_args=None, no_local=False, log_handlers=None, wait_lock=False, files=None, ammo_file=None, api_start=False, manager=None, debug=False): self.api_start = api_start self.wait_lock = wait_lock self.log_handlers = log_handlers if log_handlers is not None else [] self.files = [] if files is None else files self.ammo_file = ammo_file self.config_paths = configs self.interrupted = ProcessEvent() if api_start else ThreadEvent() self.info = TankInfo(manager.dict()) if api_start else TankInfo(dict()) self.config_list = self._combine_configs(configs, cli_options, cfg_patches, cli_args, no_local) self.core = TankCore(self.config_list, self.interrupted, self.info) self.folder = self.init_folder() self.init_logging(debug or self.core.get_option(self.core.SECTION, 'debug')) is_locked = Lock.is_locked(self.core.lock_dir) if is_locked and not self.core.config.get_option( self.SECTION, 'ignore_lock'): raise LockError(is_locked)
def __init__(self, p1, p2): self._p1 = p1 self._p2 = p2 # 由于是进程间的数据同步,所以这里只能使用通过进程信号量来共享计数数据。 # 在这里的信号量仅仅是用于计数put和get的差量。 self._unfinished_tasks = Semaphore(0) # 空队列事件,这将用于join。 self._empty = ProcessEvent() self._lock = ProcessLock()
def __init__(self, lane_departure_callback): self.lane_departure_callback = lane_departure_callback self.running = False self.out_queue = ProcessQueue() self.in_queue = ProcessQueue() self.terminate_event = ProcessEvent() self.lane_process = Process(target=self.LaneProcess) self.update_thread = threading.Thread(target=self.update_thread) self.running = True self.lane_process.start() self.update_thread.start()
def start(self): self.shutdown_flag = ProcessEvent() self.process = Process( target=run_test_rpc_node, kwargs={ "shutdown_flag": self.shutdown_flag, "lock": self.lock, "port": self.port, "shared": self.shared, } ) self.process.start() # Wait for a bit to let the process start up time.sleep(0.1)
def __init__(self, algorithm, sortable_list, on_sort_callback, sleeptime, name="running_sorting_algorithm"): self.__event = Event() self.__process_event = ProcessEvent() self.__pipe, process_end_pipe = Pipe() self.__listener = Thread(target=listening, args=(on_sort_callback, self.__pipe, self.__event)) self.__process = Process(target=observed_sorting, args=(algorithm, sortable_list, process_end_pipe, self.__process_event, sleeptime)) self.__process.name = name
def get_stop_flag(self): return ProcessEvent()
def __init__(self): self._event = ProcessEvent()
def __init__(self, jobStore): # Start the stats/logging aggregation process self._stop = ProcessEvent() self._worker = Process(target=self.statsAndLoggingAggregatorProcess, args=(jobStore, self._stop)) self._worker.start()
def __init__(self, name, outputQueue): Process.__init__(self) Manager.__init__(self, name, outputQueue) self.stopped = ProcessEvent()
def __init__(self): super(DistributorProcess, self).__init__(ProcessQueue(), ProcessEvent())