def test_failed_request2(self): """ A request is "failed" if it throws an exception while executing. The exception should be forwarded to ALL waiting requests, which should re-raise it. """ class CustomRuntimeError(RuntimeError): pass def impossible_workload(): time.sleep(0.2) raise CustomRuntimeError("Can't service your request") impossible_req = Request(impossible_workload) def wait_for_impossible(): # This request will fail... impossible_req.wait() # Since there are some exception guards in the code we're testing, # spit something out to stderr just to be sure this error # isn't getting swallowed accidentally. sys.stderr.write("ERROR: Shouldn't get here.") assert False, "Shouldn't get here." req1 = Request(wait_for_impossible) req2 = Request(wait_for_impossible) failed_ids = [] lock = threading.Lock() def handle_failed_req(req_id, failure_exc): assert isinstance(failure_exc, CustomRuntimeError) with lock: failed_ids.append(req_id) req1.notify_failed( partial(handle_failed_req, 1) ) req2.notify_failed( partial(handle_failed_req, 2) ) req1.submit() req2.submit() try: req1.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it." try: req2.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it." assert 1 in failed_ids assert 2 in failed_ids
def testRequestLock(self): """ Test the special Request-aware lock. Launch 99 requests and threads that all must fight over access to the same list. The list will eventually be 0,1,2...99, and each request will append a single number to the list. Each request must wait its turn before it can append it's number and finish. """ req_lock = RequestLock() l = [0] def append_n(n): #print "Starting append_{}\n".format(n) while True: with req_lock: if l[-1] == n-1: #print "***** Appending {}".format(n) l.append(n) return # Create 50 requests reqs = [] for i in range(1,100,2): req = Request( partial(append_n, i) ) reqs.append(req) # Create 49 threads thrds = [] for i in range(2,100,2): thrd = threading.Thread( target=partial(append_n, i) ) thrds.append(thrd) # Submit in reverse order to ensure that no request finishes until they have all been started. # This proves that the requests really are being suspended. for req in reversed(reqs): req.submit() # Start all the threads for thrd in reversed(thrds): thrd.start() # All requests must finish for req in reqs: req.wait() # All threads should finish for thrd in thrds: thrd.join() assert l == list(range(100)), "Requests and/or threads finished in the wrong order!"
def test_cancel_basic(self): """ Start a workload and cancel it. Verify that it was actually cancelled before all the work was finished. """ counter_lock = threading.RLock() def workload(): time.sleep(0.1) return 1 got_cancel = [False] workcounter = [0] def big_workload(): try: requests = [] for i in range(100): requests.append( Request(workload) ) for r in requests: workcounter[0] += r.wait() assert False, "Shouldn't get to this line. This test is designed so that big_workload should be cancelled before it finishes all its work" for r in requests: assert not r.cancelled except Request.CancellationException: got_cancel[0] = True completed = [False] def handle_complete( result ): completed[0] = True req = Request( big_workload ) req.notify_finished( handle_complete ) req.submit() time.sleep(.5) req.cancel() assert req.cancelled time.sleep(2) assert not completed[0] assert got_cancel[0] # Make sure this test is functioning properly: # The cancellation should have occurred in the middle (not before the request even got started) # If not, then adjust the timing of the cancellation, above. assert workcounter[0] != 0 assert workcounter[0] != 100
def testWorkerThreadLoopProtection(self): """ The worker threads should not die due to an exception raised within a request. """ for worker in Request.global_thread_pool.workers: assert worker.is_alive(), "Something is wrong with this test. All workers should be alive." def always_fails(): raise Exception() req = Request(always_fails) req.submit() try: req.wait() except: pass else: assert False, "Expected to request to raise an Exception!" for worker in Request.global_thread_pool.workers: assert worker.is_alive(), "An exception was propagated to a worker run loop!"
def getBigArray(directExecute, recursionDepth): """ Simulate the memory footprint of a series of computation steps. """ logger.debug( "Usage delta before depth {}: {} MB".format(recursionDepth, getMemoryIncreaseMb() ) ) if recursionDepth == 0: # A 500GB result result = numpy.zeros(shape=resultShape, dtype=numpy.uint8) else: req = Request( partial(getBigArray, directExecute=directExecute, recursionDepth=recursionDepth-1) ) if not directExecute: # Force this request to be submitted to the thread pool, # not executed synchronously in this thread. req.submit() result = req.wait() + 1 # Note that we expect there to be 2X memory usage here: # 1x for our result and 1x for the child, which hasn't been cleaned up yet. memory_increase_mb = getMemoryIncreaseMb() logger.debug( "Usage delta after depth {}: {} MB".format(recursionDepth, memory_increase_mb ) ) assert memory_increase_mb < 2.5*resultSize, "Memory from finished requests didn't get freed!" return result
def test_dont_cancel_shared_request(self): """ Test that a request isn't cancelled if it has requests pending for it. """ cancelled_requests = [] def f1(): time.sleep(1) return "RESULT" r1 = Request(f1) r1.notify_cancelled( partial(cancelled_requests.append, 1) ) def f2(): try: return r1.wait() except: cancelled_requests.append(2) r2 = Request(f2) def f3(): try: return r1.wait() except: cancelled_requests.append(3) r3 = Request(f3) def otherThread(): r2.wait() t = threading.Thread(target=otherThread) t.start() r3.submit() time.sleep(0.5) # By now both r2 and r3 are waiting for the result of r1 # Cancelling r3 should not cancel r1. r3.cancel() t.join() # Wait for r2 to finish time.sleep(0.5) assert r1.started assert r1.finished assert not r1.cancelled # Not cancelled, even though we cancelled a request that was waiting for it. assert 1 not in cancelled_requests assert r2.started assert r2.finished assert not r2.cancelled # Not cancelled. assert 1 not in cancelled_requests assert r2.wait() == "RESULT" assert r3.started assert r3.finished assert r3.cancelled # Successfully cancelled. assert 3 in cancelled_requests