def _run(self): launch_state = LaunchState() for p in self.task_descriptors: p.task_state = TaskState() # start all processes and collect their exit futures all_futures = OrderedDict() for index, p in enumerate(self.task_descriptors): if 'output_handler' in dir(p): p.output_handler.set_print_mutex(self.print_mutex) p.output_handler.set_line_prefix('[%s] ' % p.name) if 'protocol' in dir(p): yield from self._spawn_process(index) all_futures[p.protocol.exit_future] = index else: future = asyncio.async(p.coroutine) all_futures[future] = index while True: # skip if no more processes to run if not all_futures: break # wait for any process to finish kwargs = { 'return_when': asyncio.FIRST_COMPLETED, } # when the event loop run does not run in the main thread # wake up frequently and check if any subprocess has exited if not isinstance(threading.current_thread(), threading._MainThread): kwargs['timeout'] = 0.5 yield from asyncio.wait(all_futures.keys(), **kwargs) # when the event loop run does not run in the main thread # use custom logic to detect that subprocesses have exited if not isinstance(threading.current_thread(), threading._MainThread): for index, p in enumerate(self.task_descriptors): # only consider not yet done tasks if index not in all_futures.values(): continue # only subprocesses need special handling if 'transport' not in dir(p): continue # transport.get_pid() sometimes failed due to transport._proc being None proc = p.transport.get_extra_info('subprocess') if os.name != 'nt': # wait non-blocking on pid pid = proc.pid try: pid, pid_rc = os.waitpid(pid, os.WNOHANG) except ChildProcessError: continue if pid == 0: # subprocess is still running continue p.returncode = pid_rc else: # use subprocess return code, only works on Windows if proc.returncode is None: continue p.returncode = proc.returncode # trigger syncio internal process exit callback p.transport._process_exited(p.returncode) # collect done futures done_futures = [f for f in all_futures.keys() if f.done()] # collect return code restart_indices = [] for future in done_futures: index = all_futures[future] p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result self._process_message(p, 'rc ' + str(p.task_state.returncode)) # close transport if 'protocol' in dir(p): self._close_process(p) # remove future del all_futures[future] # call exit handler of done descriptors context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if p.task_state.restart: restart_indices.append(index) if launch_state.teardown: with self.print_mutex: print('() tear down') break # restart processes if requested for index in restart_indices: p = self.task_descriptors[index] if 'protocol' in dir(p): p.task_states[index].restart_count += 1 yield from self._spawn_process(index) all_futures[p.protocol.exit_future] = index # terminate all remaining processes if all_futures: # sending SIGINT to subprocess transport is not supported on Windows # https://groups.google.com/forum/#!topic/python-tulip/pr9fgX8Vh-A if os.name != 'nt': # sending SIGINT to remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'transport' in dir(p): self._process_message(p, 'signal SIGINT') try: p.transport.send_signal(signal.SIGINT) except ProcessLookupError: pass yield from asyncio.wait(all_futures.keys(), timeout=self.sigint_timeout) # cancel coroutines for future, index in all_futures.items(): if 'coroutine' in dir(p): if not future.done(): self._process_message(p, 'cancel coroutine') future.cancel() # sending SIGTERM to remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'protocol' in dir(p): if not p.protocol.exit_future.done(): self._process_message(p, 'signal SIGTERM') try: p.transport.send_signal(signal.SIGTERM) except ProcessLookupError: pass yield from asyncio.wait(all_futures.keys()) # close all remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'transport' in dir(p): self._close_process(p) # call exit handler of remaining descriptors for future, index in all_futures.items(): p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): try: exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result except asyncio.CancelledError: p.task_state.returncode = 0 self._process_message(p, 'rc ' + str(p.task_state.returncode)) context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if launch_state.returncode is None: launch_state.returncode = 0 return launch_state.returncode
async def _run(self): self.interrupt_future = asyncio.Future() self.launch_complete.clear() self.processes_spawned.clear() launch_state = LaunchState() for p in self.task_descriptors: p.task_state = TaskState() # start all processes and collect their exit futures all_futures = OrderedDict() for index, p in enumerate(self.task_descriptors): if 'output_handler' in dir(p): p.output_handler.set_print_mutex(self.print_mutex) p.output_handler.set_line_prefix('[%s] ' % p.name) if 'protocol' in dir(p): try: await self._spawn_process(index) except Exception: # clean up already spawned processes await self._terminate_processes(launch_state, all_futures) self.launch_complete.set() return 1 all_futures[p.protocol.exit_future] = index else: future = asyncio.ensure_future(p.coroutine) all_futures[future] = index # the processes are not guaranteed to be running yet, but at least you # can say that it was not possible for all of them to be running before # this point self.processes_spawned.set() while True: # skip if no more processes to run if not all_futures: break # wait for any process to finish kwargs = { 'return_when': asyncio.FIRST_COMPLETED, } # when the event loop run does not run in the main thread # wake up frequently and check if any subprocess has exited if not isinstance(threading.current_thread(), threading._MainThread): kwargs['timeout'] = 0.5 await asyncio.wait(list(all_futures.keys()) + [self.interrupt_future], **kwargs) # if asynchronously interrupted, stop looping and shutdown if self.interrupt_future.done(): break # when the event loop run does not run in the main thread # use custom logic to detect that subprocesses have exited if not isinstance(threading.current_thread(), threading._MainThread): self.check_for_exited_subprocesses(all_futures) # collect done futures done_futures = [f for f in all_futures.keys() if f.done()] # collect return code restart_indices = [] for future in done_futures: index = all_futures[future] p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result self._process_message(p, 'rc ' + str(p.task_state.returncode)) # close transport if 'protocol' in dir(p): self._close_process(p) # remove future del all_futures[future] # call exit handler of done descriptors context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if p.task_state.restart: restart_indices.append(index) if launch_state.teardown: with self.print_mutex: print('() tear down') break # restart processes if requested for index in restart_indices: p = self.task_descriptors[index] if 'protocol' in dir(p): p.task_state.restart_count += 1 await self._spawn_process(index) all_futures[p.protocol.exit_future] = index # end while True await self._terminate_processes(launch_state, all_futures) if launch_state.returncode is None: launch_state.returncode = 0 self.launch_complete.set() return launch_state.returncode
async def _run(self): self.interrupt_future = asyncio.Future() self.launch_complete.clear() self.processes_spawned.clear() launch_state = LaunchState() for p in self.task_descriptors: p.task_state = TaskState() # start all processes and collect their exit futures all_futures = OrderedDict() for index, p in enumerate(self.task_descriptors): if 'output_handler' in dir(p): p.output_handler.set_print_mutex(self.print_mutex) p.output_handler.set_line_prefix('[%s] ' % p.name) if 'protocol' in dir(p): try: await self._spawn_process(index) except Exception as e: raise _TaskException(index, e) all_futures[p.protocol.exit_future] = index else: future = asyncio.ensure_future(p.coroutine) all_futures[future] = index # the processes are not guaranteed to be running yet, but at least you # can say that it was not possible for all of them to be running before # this point self.processes_spawned.set() while True: # skip if no more processes to run if not all_futures: break # wait for any process to finish kwargs = { 'return_when': asyncio.FIRST_COMPLETED, } # when the event loop run does not run in the main thread # wake up frequently and check if any subprocess has exited if not isinstance(threading.current_thread(), threading._MainThread): kwargs['timeout'] = 0.5 await asyncio.wait(list(all_futures.keys()) + [self.interrupt_future], **kwargs) # if asynchronously interrupted, stop looping and shutdown if self.interrupt_future.done(): break # when the event loop run does not run in the main thread # use custom logic to detect that subprocesses have exited if not isinstance(threading.current_thread(), threading._MainThread): self.check_for_exited_subprocesses(all_futures) # collect done futures done_futures = [f for f in all_futures.keys() if f.done()] # collect return code restart_indices = [] for future in done_futures: index = all_futures[future] p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result self._process_message(p, 'rc ' + str(p.task_state.returncode)) # close transport if 'protocol' in dir(p): self._close_process(p) # remove future del all_futures[future] # call exit handler of done descriptors context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if p.task_state.restart: restart_indices.append(index) if launch_state.teardown: with self.print_mutex: print('() tear down') break # restart processes if requested for index in restart_indices: p = self.task_descriptors[index] if 'protocol' in dir(p): p.task_state.restart_count += 1 await self._spawn_process(index) all_futures[p.protocol.exit_future] = index # end while True # terminate all remaining processes if all_futures: # sending SIGINT to subprocess transport is not supported on Windows # https://groups.google.com/forum/#!topic/python-tulip/pr9fgX8Vh-A if os.name != 'nt': # sending SIGINT to remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'transport' in dir(p): self._process_message(p, 'signal SIGINT') try: p.transport.send_signal(signal.SIGINT) p.task_state.signals_received.append(signal.SIGINT) except ProcessLookupError: pass if isinstance(threading.current_thread(), threading._MainThread): # if in the main thread, just wait await asyncio.wait(all_futures.keys(), timeout=self.sigint_timeout) else: # if not in the main thread, wake up periodically to check SIGINT status start = time.time() short_timeout = self.sigint_timeout / 10.0 while time.time() - start < self.sigint_timeout: # each loop check for recently exited subprocesses self.check_for_exited_subprocesses(all_futures) if not [fut for fut in all_futures.keys() if not fut.done()]: # if all of the futures are done, stop waiting break await asyncio.wait(all_futures.keys(), timeout=short_timeout) # cancel coroutines for future, index in all_futures.items(): if 'coroutine' in dir(p): if not future.done(): self._process_message(p, 'cancel coroutine') future.cancel() # sending SIGTERM to remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'protocol' in dir(p): if not p.protocol.exit_future.done(): self._process_message(p, 'signal SIGTERM') try: p.transport.send_signal(signal.SIGTERM) p.task_state.signals_received.append(signal.SIGTERM) except ProcessLookupError: pass kwargs = {} if not isinstance(threading.current_thread(), threading._MainThread): # wake up periodically if we are not in the main thread kwargs['timeout'] = 0.5 pending = None while pending is None or pending: # when the event loop run does not run in the main thread # use custom logic to detect that subprocesses have exited if not isinstance(threading.current_thread(), threading._MainThread): self.check_for_exited_subprocesses(all_futures) # wait for futures to be complete _, pending = await asyncio.wait(all_futures.keys(), **kwargs) # close all remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'transport' in dir(p): self._close_process(p) # call exit handler of remaining descriptors for future, index in all_futures.items(): p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): try: exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result except asyncio.CancelledError: p.task_state.returncode = 0 self._process_message(p, 'rc ' + str(p.task_state.returncode)) context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if launch_state.returncode is None: launch_state.returncode = 0 self.launch_complete.set() return launch_state.returncode
async def _run(self): self.interrupt_future = asyncio.Future() self.launch_complete.clear() self.processes_spawned.clear() launch_state = LaunchState() for p in self.task_descriptors: p.task_state = TaskState() # start all processes and collect their exit futures all_futures = OrderedDict() for index, p in enumerate(self.task_descriptors): if 'output_handler' in dir(p): p.output_handler.set_print_mutex(self.print_mutex) p.output_handler.set_line_prefix('[%s] ' % p.name) if 'protocol' in dir(p): try: await self._spawn_process(index) except Exception as e: raise _TaskException(index, e) all_futures[p.protocol.exit_future] = index else: future = asyncio.ensure_future(p.coroutine) all_futures[future] = index # the processes are not guaranteed to be running yet, but at least you # can say that it was not possible for all of them to be running before # this point self.processes_spawned.set() while True: # skip if no more processes to run if not all_futures: break # wait for any process to finish kwargs = { 'return_when': asyncio.FIRST_COMPLETED, } # when the event loop run does not run in the main thread # wake up frequently and check if any subprocess has exited if not isinstance(threading.current_thread(), threading._MainThread): kwargs['timeout'] = 0.5 await asyncio.wait( list(all_futures.keys()) + [self.interrupt_future], **kwargs) # if asynchronously interrupted, stop looping and shutdown if self.interrupt_future.done(): break # when the event loop run does not run in the main thread # use custom logic to detect that subprocesses have exited if not isinstance(threading.current_thread(), threading._MainThread): self.check_for_exited_subprocesses(all_futures) # collect done futures done_futures = [f for f in all_futures.keys() if f.done()] # collect return code restart_indices = [] for future in done_futures: index = all_futures[future] p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result self._process_message(p, 'rc ' + str(p.task_state.returncode)) # close transport if 'protocol' in dir(p): self._close_process(p) # remove future del all_futures[future] # call exit handler of done descriptors context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if p.task_state.restart: restart_indices.append(index) if launch_state.teardown: with self.print_mutex: print('() tear down') break # restart processes if requested for index in restart_indices: p = self.task_descriptors[index] if 'protocol' in dir(p): p.task_state.restart_count += 1 await self._spawn_process(index) all_futures[p.protocol.exit_future] = index # end while True # terminate all remaining processes if all_futures: # sending SIGINT to subprocess transport is not supported on Windows # https://groups.google.com/forum/#!topic/python-tulip/pr9fgX8Vh-A if os.name != 'nt': # sending SIGINT to remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'transport' in dir(p): self._process_message(p, 'signal SIGINT') try: p.transport.send_signal(signal.SIGINT) p.task_state.signals_received.append(signal.SIGINT) except ProcessLookupError: pass if isinstance(threading.current_thread(), threading._MainThread): # if in the main thread, just wait await asyncio.wait(all_futures.keys(), timeout=self.sigint_timeout) else: # if not in the main thread, wake up periodically to check SIGINT status start = time.time() short_timeout = self.sigint_timeout / 10.0 while time.time() - start < self.sigint_timeout: # each loop check for recently exited subprocesses self.check_for_exited_subprocesses(all_futures) if not [ fut for fut in all_futures.keys() if not fut.done() ]: # if all of the futures are done, stop waiting break await asyncio.wait(all_futures.keys(), timeout=short_timeout) # cancel coroutines for future, index in all_futures.items(): if 'coroutine' in dir(p): if not future.done(): self._process_message(p, 'cancel coroutine') future.cancel() # sending SIGTERM to remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'protocol' in dir(p): if not p.protocol.exit_future.done(): self._process_message(p, 'signal SIGTERM') try: p.transport.send_signal(signal.SIGTERM) p.task_state.signals_received.append( signal.SIGTERM) except ProcessLookupError: pass kwargs = {} if not isinstance(threading.current_thread(), threading._MainThread): # wake up periodically if we are not in the main thread kwargs['timeout'] = 0.5 pending = None while pending is None or pending: # when the event loop run does not run in the main thread # use custom logic to detect that subprocesses have exited if not isinstance(threading.current_thread(), threading._MainThread): self.check_for_exited_subprocesses(all_futures) # wait for futures to be complete _, pending = await asyncio.wait(all_futures.keys(), **kwargs) # close all remaining processes for index in all_futures.values(): p = self.task_descriptors[index] if 'transport' in dir(p): self._close_process(p) # call exit handler of remaining descriptors for future, index in all_futures.items(): p = self.task_descriptors[index] # collect return code / exception from coroutine if 'coroutine' in dir(p): try: exp = future.exception() if exp: p.task_state.exception = exp p.task_state.returncode = 1 self._print_process_stacktrace(p.name, future, exp) else: result = future.result() p.task_state.returncode = result except asyncio.CancelledError: p.task_state.returncode = 0 self._process_message(p, 'rc ' + str(p.task_state.returncode)) context = ExitHandlerContext(launch_state, p.task_state) p.exit_handler(context) if launch_state.returncode is None: launch_state.returncode = 0 self.launch_complete.set() return launch_state.returncode