def catches_exceptions(self): # Induce exception by submitting a bad queue obj t = EHThread(target=self.worker, args=[None]) t.start() t.join() wrapper = t.exception() ok_(isinstance(wrapper, ExceptionWrapper)) eq_(wrapper.kwargs, {'args': [None], 'target': self.worker}) eq_(wrapper.type, AttributeError) ok_(isinstance(wrapper.value, AttributeError))
def catches_exceptions(self): # Induce exception by submitting a bad queue obj t = EHThread(target=self.worker, args=[None]) t.start() t.join() wrapper = t.exception() assert isinstance(wrapper, ExceptionWrapper) assert wrapper.kwargs == {"args": [None], "target": self.worker} assert wrapper.type == AttributeError assert isinstance(wrapper.value, AttributeError)
def run(self, *args, **kwargs): results = GroupResult() queue = Queue() threads = [] for cxn in self: my_kwargs = dict( cxn=cxn, queue=queue, args=args, kwargs=kwargs, ) thread = ExceptionHandlingThread( target=thread_worker, kwargs=my_kwargs, ) threads.append(thread) for thread in threads: thread.start() for thread in threads: # TODO: configurable join timeout # TODO: (in sudo's version) configurability around interactive # prompting resulting in an exception instead, as in v1 thread.join() # Get non-exception results from queue while not queue.empty(): # TODO: io-sleep? shouldn't matter if all threads are now joined cxn, result = queue.get(block=False) # TODO: outstanding musings about how exactly aggregate results # ought to ideally operate...heterogenous obj like this, multiple # objs, ?? results[cxn] = result # Get exceptions from the threads themselves. # TODO: in a non-thread setup, this would differ, e.g.: # - a queue if using multiprocessing # - some other state-passing mechanism if using e.g. coroutines # - ??? excepted = False for thread in threads: wrapper = thread.exception() if wrapper is not None: # Outer kwargs is Thread instantiation kwargs, inner is kwargs # passed to thread target/body. cxn = wrapper.kwargs['kwargs']['cxn'] results[cxn] = wrapper.value excepted = True if excepted: raise GroupException(results) return results
def _run_and_kill(self, pty): def bg_body(): # No reliable way to detect "an exception happened in the inner # child that wasn't KeyboardInterrupt", so best we can do is: # * Ensure exited 130 # * Get mad if any output is seen that doesn't look like # KeyboardInterrupt stacktrace (because it's probably some # OTHER stacktrace). pty_flag = "--pty" if pty else "--no-pty" result = run( "inv -c signal_tasks expect SIGINT {0}".format(pty_flag), hide=True, warn=True, ) bad_signal = result.exited != 130 output = result.stdout + result.stderr had_keyboardint = 'KeyboardInterrupt' in output if bad_signal or (output and not had_keyboardint): err = "Subprocess had output and/or bad exit:" raise Exception("{0}\n\n{1}".format(err, result)) # Execute sub-invoke in a thread so we can talk to its subprocess # while it's running. # TODO: useful async API for run() which at least wraps threads for # you, and exposes the inner PID bg = ExceptionHandlingThread(target=bg_body) bg.start() # Wait a bit to ensure subprocess is in the right state & not still # starting up (lolpython?). NOTE: if you bump this you must also # bump the `signal.alarm` call within _support/signaling.py! # Otherwise both tests will always fail as the ALARM fires # (resulting in "Never got any signals!" in debug log) before this # here sleep finishes. time.sleep(2 if PYPY else 1) # Send expected signal (use pty to ensure no intermediate 'sh' # processes on Linux; is of no consequence on Darwin.) interpreter = 'pypy' if PYPY else 'python' cmd = "pkill -INT -f \"{0}.*inv -c signal_tasks\"" run(cmd.format(interpreter), pty=True) # Rejoin subprocess thread & check for exceptions bg.join() wrapper = bg.exception() if wrapper: # This is an ExceptionWrapper, not an actual exception, since # most places using ExceptionHandlingThread need access to the # thread's arguments & such. We just want the exception here. raise wrapper.value
def _run_and_kill(self, pty): def bg_body(): # No reliable way to detect "an exception happened in the inner # child that wasn't KeyboardInterrupt", so best we can do is: # * Ensure exited 130 # * Get mad if any output is seen that doesn't look like # KeyboardInterrupt stacktrace (because it's probably some # OTHER stacktrace). pty_flag = "--pty" if pty else "--no-pty" result = run( "inv -c signal_tasks expect SIGINT {0}".format(pty_flag), hide=True, warn=True, ) bad_signal = result.exited != 130 output = result.stdout + result.stderr had_keyboardint = 'KeyboardInterrupt' in output if bad_signal or (output and not had_keyboardint): err = "Subprocess had output and/or bad exit:" raise Exception("{0}\n\n{1}".format(err, result)) # Execute sub-invoke in a thread so we can talk to its subprocess # while it's running. # TODO: useful async API for run() which at least wraps threads for # you, and exposes the inner PID bg = ExceptionHandlingThread(target=bg_body) bg.start() # Wait a bit to ensure subprocess is in the right state & not still # starting up (lolpython?). NOTE: if you bump this you must also # bump the `signal.alarm` call within _support/signaling.py! # Otherwise both tests will always fail as the ALARM fires # (resulting in "Never got any signals!" in debug log) before this # here sleep finishes. time.sleep(1) # Send expected signal (use pty to ensure no intermediate 'sh' # processes on Linux; is of no consequence on Darwin.) interpreter = 'pypy' if PYPY else 'python' cmd = "pkill -INT -f \"{0}.*inv -c signal_tasks\"" run(cmd.format(interpreter), pty=True) # Rejoin subprocess thread & check for exceptions bg.join() wrapper = bg.exception() if wrapper: # This is an ExceptionWrapper, not an actual exception, since # most places using ExceptionHandlingThread need access to the # thread's arguments & such. We just want the exception here. raise wrapper.value
def _run_and_kill(self, pty): def bg_body(): # TODO: use 'expect exit of 130' when that's implemented Hide # output by default, then assume an error & display stderr, if # there's any stderr. (There's no reliable way to tell the # subprocess raised an exception, because we'll be interrupted # before completion, and won't have access to its exit code.) pty_flag = "--pty" if pty else "--no-pty" result = run( "inv -c signal_tasks expect SIGINT {0}".format(pty_flag), hide=True, warn=True, ) if result.exited != 130 or result.stdout or result.stderr: err = "Subprocess had output and/or bad exit:" raise Exception("{0}\n\n{1}".format(err, result)) # Execute sub-invoke in a thread so we can talk to its subprocess # while it's running. # TODO: useful async API for run() which at least wraps threads for # you, and exposes the inner PID bg = ExceptionHandlingThread(target=bg_body) bg.start() # Wait a bit to ensure subprocess is in the right state & not still # starting up (lolpython?) time.sleep(1) # Send expected signal (use pty to ensure no intermediate 'sh' # processes on Linux; is of no consequence on Darwin.) run("pkill -INT -f \"python.*inv -c signal_tasks\"", pty=True) # Rejoin subprocess thread & check for exceptions bg.join() wrapper = bg.exception() if wrapper: # This is an ExceptionWrapper, not an actual exception, since # most places using ExceptionHandlingThread need access to the # thread's arguments & such. We just want the exception here. raise wrapper.value