class CaptureProcess(multiprocessing.Process): def __init__(self, target=None, mode='stdout', args=(), kwargs={}): ''' Specialized process module to capture standard output or standard error from functions: target - Function to run asynchronously and capture output from mode - Sets whether to capture standard output (stdout) or standard error (stderr) args - Tuple for the target invocation kwargs - Dictionary of keyword arguments for the target invocation ''' super().__init__(target=target, args=args, kwargs=kwargs) if mode == 'stdout': self.redirect = contextlib.redirect_stdout elif mode == 'stderr': self.redirect = contextlib.redirect_stderr else: raise ValueError( 'Expected mode to be stdout or stderr instead of {}'.format( mode)) self._queue = Queue(ctx=multiprocessing.get_context()) setattr(self._queue, 'write', self._queue.put) def run(self): with self.redirect(self._queue): super().run() def get_output(self): try: return self._queue.get_nowait() except Empty: return None
def reader(ioloop): while True: try: cmd = [sys.executable, reader_py, sensor_name] if simulate: cmd.append('simulate') # this subprocess creation with pipes works on Unix and Windows! proc = subprocess.Popen(cmd, bufsize=1, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) q = Queue() t = Thread(target=enque_output, args=(proc.stdout, q)) t.daemon = True t.start() tick = 1 while True: try: line = q.get_nowait().strip( ) # get next line from subprocess except: yield gen.sleep(1) # wait 1 second continue # no output yet print('Got JSON: %r' % line) try: measurement = json.loads(line) #print(measurement) ioloop.add_callback(send_update, measurement) if db_use and (tick % db_save) == 0: ioloop.add_callback(update_db, USBPORT, line) tick += 1 except: print_exc(sys._getframe().f_code.co_name, 'invalid JSON: ') yield gen.sleep(1) # wait 1 second continue except: print_exc(sys._getframe().f_code.co_name) yield gen.sleep(5) # wait 5 seconds
def _worker(module: str, strategy: Dict[str, Any], output: Dict[str, Any], tasks_to_accomplish: mpq.Queue, lock: mps.Lock, verbosity: int) -> True: """ Worker function running simulation queues. Arguments: module: Name of simulation module to be run tasks_to_accomplish: Queue of remaining tasks lock: Multiprocessing lock (used for parallel simulations only) output: Dictionary containing output information verbosity: Verbosity level Returns: True when tasks are completed """ # pylint: disable=no-member parallel = isinstance(lock, mp.synchronize.Lock) if parallel: this = mp.current_process().name else: this = 'main' if verbosity > 1 and parallel: print(indent2 + 'starting ' + this) # create local copy of strategy object strategy = Strategy.load(strategy) variations = strategy.variations # pylint: disable=no-member tasks = set(variations.keys()) # create local copy of output object if isinstance(output, dict): out = Output.from_dict(output) else: out = None other = None reschedule = 0 while reschedule < len(tasks): try: # retrieve next simulation task task, config = tasks_to_accomplish.get_nowait() except queue.Empty: # no tasks left if verbosity > 1 and parallel: print(indent2 + 'terminating ' + this) break try: # create object obj = Simulation.from_module(module) base = strategy.base(task) restart = None if obj.has_restart and out and base: if parallel: with lock: done = set(out.dir()) restart = out.load_like(base, other) else: done = set(out.dir()) restart = out.load_like(base, other) invalid = done - tasks # empty if labels follow task names if restart is None and not invalid: # need to pick another task if verbosity > 1: print(indent2 + 'rescheduling {} ({})'.format(task, this)) tasks_to_accomplish.put((task, config)) reschedule += 1 continue if verbosity > 0: msg = indent1 + 'running `{}` ({})' print(msg.format(task, this)) # run task if restart is None: obj.run(config) else: obj.restart(restart, config) data = obj.data errored = False except Exception as err: # Convert exception to warning msg = "Simulation of '{}' for '{}' failed with error message:\n{}".format( type(obj).__name__, task, err) warnings.warn(msg, RuntimeWarning) data = {task: (type(err).__name__, str(err))} errored = True # save output if out and obj.data: if parallel: with lock: out.save(data, entry=task, variation=variations[task], errored=errored) else: out.save(data, entry=task, variation=variations[task], errored=errored) other = obj.data return True
def _fetch_single_message(self, actor_id, queue: Queue): try: target, message = queue.get_nowait() self._dispatch_single_message(target, actor_id, message) except Empty: logger.debug("nothing to fetch")