def __init__(self): # set to request the record process to stop self.stopping = PEvent() # set when the record process completes self.stopped = PEvent() # set if recording process fails self.failed = PEvent() self.live = LiveViewState()
def watchdog_event_worker(self, test_watch, trigger_event_callable, is_tree_watch=False, num_event_threshold=1, trigger_args=(), trigger_kwargs={}): """ Use a worker process to add watch to event_dispatcher and iterate over the event generator until an event matching the watch is generated, then exit with success. parent process joins the worker with a timeout, after the join, if the worker exited with success, then the event must have been generated within the timeout and we return True, if the exitcode is None, the join must have timed out, so the parent terminates the worker and returns False. If num_event_threshold is specified, the worker will only succeed if that many events are generated; this supports testing of recursive tree watches and load testing generally. trigger_event_callable will be called (with trigger_args and trigger kwargs) in the parent context after the worker has added the watch, to perform whatever action triggers the kernel to generate the event(s). """ ret = False added_watch_event = PEvent() got_event = PEvent() worker_p = Process(group=None, target=self._event_worker, name=None, args=( test_watch, num_event_threshold, added_watch_event, got_event, is_tree_watch, )) worker_p.start() #don't call this until after worker process has added watch if added_watch_event.wait(self.worker_timeout): trigger_event_callable(*trigger_args, **trigger_kwargs) if got_event.wait(self.worker_timeout): ret = True worker_p.terminate() return ret
def t3(): COUNT = 100000000 event = PEvent() p1 = Process(target=countdown, args=(COUNT // 2, event)) p2 = Process(target=countdown, args=(COUNT // 2, event)) p1.start() p2.start() p1.join() p2.join()
def t5(): COUNT = 100000000 event = PEvent() p1 = Process(target=countdown, args=(COUNT, event)) p2 = Process(target=io_op, args=(COUNT, event, 'process.txt')) p1.start() p2.start() p1.join() p2.join()
def __init__(self): self.enabled: bool = False self.queue = Queue(10) self.stopped = PEvent() self.calibrated: bool = False # bias file to use for calibration self.bias_file: Optional[str] = None
def t3_bindingcpu(): COUNT = 100000000 event = PEvent() p1 = Process(target=countdown, args=(COUNT // 2, event)) p2 = Process(target=countdown, args=(COUNT // 2, event)) p1.start() BindingCpu.affinity_cpu(p1, 4) p2.start() BindingCpu.affinity_cpu(p2, 8) p1.join() p2.join()
def t5_bindingcpu(): COUNT = 100000000 event = PEvent() p1 = Process(target=countdown, args=(COUNT, event)) p2 = Process(target=io_op, args=(COUNT, event, 'process.txt')) p1.start() BindingCpu.affinity_cpu(p1, 256) p2.start() BindingCpu.affinity_cpu(p2, 512) p1.join() p2.join()
def _parse_config(self): """ Parse the YML config file and add components to the Car. """ logger.info('Parsing config file to add car components...') if 'parallel' in self.config and self.config['parallel'] == 'process': self.parallel_process = True self.stop_event = PEvent() logger.info('Using process level parallel for components.') else: self.parallel_process = False self.stop_event = TEvent() for component in self.config['components']: comp_module = str(component) pwd = os.path.abspath('.') if pwd.endswith('src'): pwd = os.path.abspath('components') else: pwd = os.path.abspath('src/components') with open(pwd + '/' + comp_module + '.py', 'r') as f: source = '\n'.join(f.readlines()) p = ast.parse(source) classes = [ node.name for node in ast.walk(p) if isinstance(node, ast.ClassDef) ] if len(classes) == 0: raise ValueError( "submodule '{}' contains no component class!".format( comp_module)) if len(classes) == 1: # only 1 class defined self._add_component(component, classes[0]) else: for cls in classes: # multiple classes within module if cls in self.config['components'][component]: self._add_component(component, cls)
def __init__(self, *args, **kwargs): self.event = PEvent(*args, **kwargs)