def test_result_discarded(self): """ After a request is deleted, its result should be discarded. """ import weakref from functools import partial def f(): return numpy.zeros( (10,), dtype=numpy.uint8 ) + 1 w = [None] def onfinish(r, result): w[0] = weakref.ref(result) req = Request(f) req.notify_finished( partial(onfinish, req) ) req.submit() req.wait() del req # The ThreadPool._Worker loop has a local reference (next_task), # so wait just a tic for the ThreadPool worker to cycle back to the top of its loop (and discard the reference) time.sleep(0.1) assert w[0]() is None
def testWorkerThreadLoopProtection(self): """ The worker threads should not die due to an exception raised within a request. """ for worker in Request.global_thread_pool.workers: assert worker.is_alive(), "Something is wrong with this test. All workers should be alive." def always_fails(): raise Exception() req = Request(always_fails) try: req.submit() except: if Request.global_thread_pool.num_workers > 0: raise else: if Request.global_thread_pool.num_workers == 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" try: req.wait() except: pass else: if Request.global_thread_pool.num_workers > 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" for worker in Request.global_thread_pool.workers: assert worker.is_alive(), "An exception was propagated to a worker run loop!"
def test_result_discarded(self): """ After a request is deleted, its result should be discarded. """ import weakref from functools import partial def f(): return numpy.zeros((10, ), dtype=numpy.uint8) + 1 w = [None] def onfinish(r, result): w[0] = weakref.ref(result) req = Request(f) req.notify_finished(partial(onfinish, req)) req.submit() req.wait() del req # The ThreadPool._Worker loop has a local reference (next_task), # so wait just a tic for the ThreadPool worker to cycle back to the top of its loop (and discard the reference) time.sleep(0.1) assert w[0]() is None
def testWorkerThreadLoopProtection(self): """ The worker threads should not die due to an exception raised within a request. """ for worker in Request.global_thread_pool.workers: assert worker.is_alive( ), "Something is wrong with this test. All workers should be alive." def always_fails(): raise Exception() req = Request(always_fails) try: req.submit() except: if Request.global_thread_pool.num_workers > 0: raise else: if Request.global_thread_pool.num_workers == 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" try: req.wait() except: pass else: if Request.global_thread_pool.num_workers > 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" for worker in Request.global_thread_pool.workers: assert worker.is_alive( ), "An exception was propagated to a worker run loop!"
def test_callbacks_before_wait_returns(self): """ If the user adds callbacks to the request via notify_finished() BEFORE the request is submitted, then wait() should block for the completion of all those callbacks before returning. Any callbacks added AFTER the request has already been submitted are NOT guaranteed to be executed before wait() returns, but they will still be executed. """ def someQuickWork(): return 42 callback_results = [] def slowCallback(n, result): time.sleep(0.1) callback_results.append(n) req = Request(someQuickWork) req.notify_finished(partial(slowCallback, 1)) req.notify_finished(partial(slowCallback, 2)) req.notify_finished(partial(slowCallback, 3)) result = req.wait() assert result == 42 assert callback_results == [ 1, 2, 3 ], "wait() returned before callbacks were complete! Got: {}".format( callback_results) req.notify_finished(partial(slowCallback, 4)) req.wait() assert callback_results == [ 1, 2, 3, 4 ], "Callback on already-finished request wasn't executed."
def test_callbacks_before_wait_returns(self): """ If the user adds callbacks to the request via notify_finished() BEFORE the request is submitted, then wait() should block for the completion of all those callbacks before returning. Any callbacks added AFTER the request has already been submitted are NOT guaranteed to be executed before wait() returns, but they will still be executed. """ def someQuickWork(): return 42 callback_results = [] def slowCallback(n, result): time.sleep(0.1) callback_results.append(n) req = Request( someQuickWork ) req.notify_finished( partial(slowCallback, 1) ) req.notify_finished( partial(slowCallback, 2) ) req.notify_finished( partial(slowCallback, 3) ) result = req.wait() assert result == 42 assert callback_results == [1,2,3], "wait() returned before callbacks were complete! Got: {}".format( callback_results ) req.notify_finished( partial(slowCallback, 4) ) req.wait() assert callback_results == [1,2,3,4], "Callback on already-finished request wasn't executed."
def test_failed_request2(self): """ A request is "failed" if it throws an exception while executing. The exception should be forwarded to ALL waiting requests, which should re-raise it. """ class CustomRuntimeError(RuntimeError): pass def impossible_workload(): time.sleep(0.2) raise CustomRuntimeError("Can't service your request") impossible_req = Request(impossible_workload) def wait_for_impossible(): # This request will fail... impossible_req.wait() # Since there are some exception guards in the code we're testing, # spit something out to stderr just to be sure this error # isn't getting swallowed accidentally. sys.stderr.write("ERROR: Shouldn't get here.") assert False, "Shouldn't get here." req1 = Request(wait_for_impossible) req2 = Request(wait_for_impossible) failed_ids = [] lock = threading.Lock() def handle_failed_req(req_id, failure_exc, exc_info): assert isinstance(failure_exc, CustomRuntimeError) with lock: failed_ids.append(req_id) req1.notify_failed( partial(handle_failed_req, 1) ) req2.notify_failed( partial(handle_failed_req, 2) ) req1.submit() req2.submit() try: req1.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it." try: req2.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it." assert 1 in failed_ids assert 2 in failed_ids
def testRequestLock(self): """ Test the special Request-aware lock. Launch 99 requests and threads that all must fight over access to the same list. The list will eventually be 0,1,2...99, and each request will append a single number to the list. Each request must wait its turn before it can append it's number and finish. """ # This test doesn't work if the request system is working in single-threaded 'debug' mode. # It depends on concurrent execution to make progress. Otherwise it hangs. if Request.global_thread_pool.num_workers == 0: raise nose.SkipTest req_lock = RequestLock() l = [0] def append_n(n): #print "Starting append_{}\n".format(n) while True: with req_lock: if l[-1] == n - 1: #print "***** Appending {}".format(n) l.append(n) return # Create 50 requests N = 50 reqs = [] for i in range(1, 2 * N, 2): req = Request(partial(append_n, i)) reqs.append(req) # Create 49 threads thrds = [] for i in range(2, 2 * N, 2): thrd = threading.Thread(target=partial(append_n, i)) thrds.append(thrd) # Submit in reverse order to ensure that no request finishes until they have all been started. # This proves that the requests really are being suspended. for req in reversed(reqs): req.submit() # Start all the threads for thrd in reversed(thrds): thrd.start() # All requests must finish for req in reqs: req.wait() # All threads should finish for thrd in thrds: thrd.join() assert l == list( range(100)), "Requests and/or threads finished in the wrong order!"
def test_submit_dependent_requests_should_execute_on_same_worker(self): more_work = Request(Work(lambda: 42)) req = Request(Work(lambda: more_work.wait())) req.submit() assert req.wait() == 42 assert req.assigned_worker in Request.global_thread_pool.workers assert req.assigned_worker == more_work.assigned_worker
def test_failed_request2(self): """ A request is "failed" if it throws an exception while executing. The exception should be forwarded to ALL waiting requests, which should re-raise it. """ class CustomRuntimeError(RuntimeError): pass def impossible_workload(): time.sleep(0.2) raise CustomRuntimeError("Can't service your request") impossible_req = Request(impossible_workload) def wait_for_impossible(): # This request will fail... impossible_req.wait() # Since there are some exception guards in the code we're testing, # spit something out to stderr just to be sure this error # isn't getting swallowed accidentally. sys.stderr.write("ERROR: Shouldn't get here.") assert False, "Shouldn't get here." req1 = Request(wait_for_impossible) req2 = Request(wait_for_impossible) failed_ids = [] lock = threading.Lock() def handle_failed_req(req_id, failure_exc, exc_info): assert isinstance(failure_exc, CustomRuntimeError) with lock: failed_ids.append(req_id) req1.notify_failed(partial(handle_failed_req, 1)) req2.notify_failed(partial(handle_failed_req, 2)) req1.submit() req2.submit() try: req1.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it." try: req2.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it." assert 1 in failed_ids assert 2 in failed_ids
def testRequestLock(self): """ Test the special Request-aware lock. Launch 99 requests and threads that all must fight over access to the same list. The list will eventually be 0,1,2...99, and each request will append a single number to the list. Each request must wait its turn before it can append it's number and finish. """ # This test doesn't work if the request system is working in single-threaded 'debug' mode. # It depends on concurrent execution to make progress. Otherwise it hangs. if Request.global_thread_pool.num_workers == 0: raise nose.SkipTest req_lock = RequestLock() l = [0] def append_n(n): #print "Starting append_{}\n".format(n) while True: with req_lock: if l[-1] == n-1: #print "***** Appending {}".format(n) l.append(n) return # Create 50 requests N = 50 reqs = [] for i in range(1,2*N,2): req = Request( partial(append_n, i) ) reqs.append(req) # Create 49 threads thrds = [] for i in range(2,2*N,2): thrd = threading.Thread( target=partial(append_n, i) ) thrds.append(thrd) # Submit in reverse order to ensure that no request finishes until they have all been started. # This proves that the requests really are being suspended. for req in reversed(reqs): req.submit() # Start all the threads for thrd in reversed(thrds): thrd.start() # All requests must finish for req in reqs: req.wait() # All threads should finish for thrd in thrds: thrd.join() assert l == list(range(100)), "Requests and/or threads finished in the wrong order!"
def test_should_be_called_after_request_finishes(self): cb = mock.Mock() req = Request(lambda: 42) req.add_done_callback(cb) cb.assert_not_called() req.submit() req.wait() cb.assert_called_once_with(req)
def test_if_request_finished_should_call_immidiatelly(self): cb = mock.Mock() def work(): return 42 req = Request(work) req.submit() req.wait() req.add_done_callback(cb) cb.assert_called_once_with(req)
def testSimpleRequestCondition(self): """ Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality. (See the docs for details.) """ Request.reset_thread_pool(num_workers=1) N_ELEMENTS = 10 # It's tempting to simply use threading.Condition here, # but that doesn't quite work if the thread calling wait() is also a worker thread. # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.) # cond = threading.Condition( RequestLock() ) cond = SimpleRequestCondition() produced = [] consumed = [] def wait_for_all(): def f(i): time.sleep(0.2 * random.random()) with cond: produced.append(i) cond.notify() reqs = [] for i in range(N_ELEMENTS): req = Request(partial(f, i)) reqs.append(req) for req in reqs: req.submit() _consumed = consumed with cond: while len(_consumed) < N_ELEMENTS: while len(_consumed) == len(produced): cond.wait() logger.debug("copying {} elements".format( len(produced) - len(consumed))) _consumed += produced[len(_consumed):] # Force the request to run in a worker thread. # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock. req = Request(wait_for_all) req.submit() # Now block for completion req.wait() logger.debug("produced: {}".format(produced)) logger.debug("consumed: {}".format(consumed)) assert set(consumed) == set( range(N_ELEMENTS) ), "Expected set(range(N_ELEMENTS)), got {}".format(consumed)
def test_lotsOfSmallRequests(self): """ Fire off some reasonably large random number of nested requests. Mostly, this test ensures that the requests all complete without a hang. """ handlerCounter = [0] handlerLock = threading.Lock() def completionHandler(result, req): logger.debug("Handing completion {}".format(result)) handlerLock.acquire() handlerCounter[0] += 1 handlerLock.release() req.calledHandler = True requestCounter = [0] requestLock = threading.Lock() allRequests = [] # This closure randomly chooses to either (a) return immediately or (b) fire off more work def someWork(depth, force=False, i=-1): #print 'depth=', depth, 'i=', i if depth > 0 and (force or random.random() > 0.5): requests = [] for i in range(10): req = Request(partial(someWork, depth=depth - 1, i=i)) req.notify_finished(partial(completionHandler, req=req)) requests.append(req) allRequests.append(req) requestLock.acquire() requestCounter[0] += 1 requestLock.release() for r in requests: r.wait() return requestCounter[0] req = Request(partial(someWork, depth=4, force=True)) logger.debug("Waiting for requests...") req.wait() logger.debug("root request finished") # Handler should have been called once for each request we fired assert handlerCounter[0] == requestCounter[0] logger.debug("finished testLotsOfSmallRequests") for r in allRequests: assert r.finished logger.debug("waited for all subrequests")
def testSimpleRequestCondition(self): """ Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality. (See the docs for details.) """ num_workers = Request.global_thread_pool.num_workers Request.reset_thread_pool(num_workers=1) N_ELEMENTS = 10 # It's tempting to simply use threading.Condition here, # but that doesn't quite work if the thread calling wait() is also a worker thread. # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.) # cond = threading.Condition( RequestLock() ) cond = SimpleRequestCondition() produced = [] consumed = [] def wait_for_all(): def f(i): time.sleep(0.2*random.random()) with cond: produced.append(i) cond.notify() reqs = [] for i in range(N_ELEMENTS): req = Request( partial(f, i) ) reqs.append( req ) for req in reqs: req.submit() _consumed = consumed with cond: while len(_consumed) < N_ELEMENTS: while len(_consumed) == len(produced): cond.wait() logger.debug( "copying {} elements".format( len(produced) - len(consumed) ) ) _consumed += produced[len(_consumed):] # Force the request to run in a worker thread. # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock. req = Request( wait_for_all ) req.submit() # Now block for completion req.wait() logger.debug( "produced: {}".format(produced) ) logger.debug( "consumed: {}".format(consumed) ) assert set(consumed) == set( range(N_ELEMENTS) ), "Expected set(range(N_ELEMENTS)), got {}".format( consumed ) Request.reset_thread_pool(num_workers)
def test_if_request_has_been_cancelled_callback_should_still_be_called(self): cb = mock.Mock() req = Request(lambda: 42) req.cancel() req.add_done_callback(cb) req.submit() with pytest.raises(Request.InvalidRequestException): req.wait() cb.assert_called_once_with(req)
def test_lotsOfSmallRequests(self): """ Fire off some reasonably large random number of nested requests. Mostly, this test ensures that the requests all complete without a hang. """ handlerCounter = [0] handlerLock = threading.Lock() def completionHandler( result, req ): logger.debug( "Handing completion {}".format(result) ) handlerLock.acquire() handlerCounter[0] += 1 handlerLock.release() req.calledHandler = True requestCounter = [0] requestLock = threading.Lock() allRequests = [] # This closure randomly chooses to either (a) return immediately or (b) fire off more work def someWork(depth, force=False, i=-1): #print 'depth=', depth, 'i=', i if depth > 0 and (force or random.random() > 0.5): requests = [] for i in range(10): req = Request( partial(someWork, depth=depth-1, i=i) ) req.notify_finished( partial(completionHandler, req=req) ) requests.append(req) allRequests.append(req) requestLock.acquire() requestCounter[0] += 1 requestLock.release() for r in requests: r.wait() return requestCounter[0] req = Request( partial(someWork, depth=4, force=True) ) logger.debug("Waiting for requests...") req.wait() logger.debug("root request finished") # Handler should have been called once for each request we fired assert handlerCounter[0] == requestCounter[0] logger.debug("finished testLotsOfSmallRequests") for r in allRequests: assert r.finished logger.debug("waited for all subrequests")
def testRequestLock(self): """ Test the special Request-aware lock. Launch 99 requests and threads that all must fight over access to the same list. The list will eventually be 0,1,2...99, and each request will append a single number to the list. Each request must wait its turn before it can append it's number and finish. """ req_lock = RequestLock() l = [0] def append_n(n): #print "Starting append_{}\n".format(n) while True: with req_lock: if l[-1] == n - 1: #print "***** Appending {}".format(n) l.append(n) return # Create 50 requests reqs = [] for i in range(1, 100, 2): req = Request(partial(append_n, i)) reqs.append(req) # Create 49 threads thrds = [] for i in range(2, 100, 2): thrd = threading.Thread(target=partial(append_n, i)) thrds.append(thrd) # Submit in reverse order to ensure that no request finishes until they have all been started. # This proves that the requests really are being suspended. for req in reversed(reqs): req.submit() # Start all the threads for thrd in reversed(thrds): thrd.start() # All requests must finish for req in reqs: req.wait() # All threads should finish for thrd in thrds: thrd.join() assert l == list( range(100)), "Requests and/or threads finished in the wrong order!"
def test_early_cancel(self): """ If you try to wait for a request after it's already been cancelled, you get a InvalidRequestException. """ def f(): pass req = Request(f) req.cancel() try: req.wait() except Request.InvalidRequestException: pass else: assert False, "Expected a Request.InvalidRequestException because we're waiting for a request that's already been cancelled."
def testRequestLock(self): """ Test the special Request-aware lock. Launch 99 requests and threads that all must fight over access to the same list. The list will eventually be 0,1,2...99, and each request will append a single number to the list. Each request must wait its turn before it can append it's number and finish. """ req_lock = RequestLock() l = [0] def append_n(n): #print "Starting append_{}\n".format(n) while True: with req_lock: if l[-1] == n-1: #print "***** Appending {}".format(n) l.append(n) return # Create 50 requests reqs = [] for i in range(1,100,2): req = Request( partial(append_n, i) ) reqs.append(req) # Create 49 threads thrds = [] for i in range(2,100,2): thrd = threading.Thread( target=partial(append_n, i) ) thrds.append(thrd) # Submit in reverse order to ensure that no request finishes until they have all been started. # This proves that the requests really are being suspended. for req in reversed(reqs): req.submit() # Start all the threads for thrd in reversed(thrds): thrd.start() # All requests must finish for req in reqs: req.wait() # All threads should finish for thrd in thrds: thrd.join() assert l == list(range(100)), "Requests and/or threads finished in the wrong order!"
def test_if_request_failed_callback_should_still_be_called(self): cb = mock.Mock() def work(): raise Exception() req = Request(work) req.add_done_callback(cb) req.submit() with pytest.raises(Exception): req.wait() cb.assert_called_once_with(req)
def test_callWaitDuringCallback(self): """ When using request.notify_finished(...) to handle request completions, the handler should be allowed to call request.wait() on the request that it's handling. """ def handler(req, result): req.wait() def workFn(): pass req = Request(workFn) req.notify_finished( partial(handler, req) ) #req.submit() req.wait()
def test_callWaitDuringCallback(self): """ When using request.notify_finished(...) to handle request completions, the handler should be allowed to call request.wait() on the request that it's handling. """ def handler(req, result): req.wait() def workFn(): pass req = Request(workFn) req.notify_finished(partial(handler, req)) #req.submit() req.wait()
def test_request_timeout(self): """ Test the timeout feature when calling wait() from a foreign thread. See wait() for details. """ def slowWorkload(): time.sleep(10.0) req = Request(slowWorkload) try: req.wait(0.5) except Request.TimeoutException: pass else: assert False, "Expected to get Request.TimeoutException"
def test_request_timeout(self): """ Test the timeout feature when calling wait() from a foreign thread. See wait() for details. """ def slowWorkload(): time.sleep( 10.0 ) req = Request( slowWorkload ) try: req.wait(0.5) except Request.TimeoutException: pass else: assert False, "Expected to get Request.TimeoutException"
def test_failed_request(self): """ A request is "failed" if it throws an exception while executing. The exception should be forwarded to ALL waiting requests. """ def impossible_workload(): raise RuntimeError("Intentional exception.") req = Request(impossible_workload) try: req.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it."
def test_basic(self): """ Fire a couple requests and check the answer they give. """ def someWork(): time.sleep(0.001) return "Hello," callback_result = [''] def callback(result): callback_result[0] = result def test(s): req = Request(someWork) req.notify_finished(callback) s2 = req.wait() time.sleep(0.001) return s2 + s req = Request( partial(test, s = " World!") ) req.notify_finished(callback) # Wait for the result assert req.wait() == "Hello, World!" # Wait for it assert req.wait() == "Hello, World!" # It's already finished, should be same answer assert callback_result[0] == "Hello, World!" # From the callback requests = [] for i in range(10): req = Request( partial(test, s = "hallo %d" %i) ) requests.append(req) for r in requests: r.wait()
def getBigArray(directExecute, recursionDepth): """ Simulate the memory footprint of a series of computation steps. """ logger.debug("Usage delta before depth {}: {}".format( recursionDepth, getMemoryIncrease())) if recursionDepth == 0: # A 500MB result result = numpy.zeros(shape=resultShape, dtype=numpy.uint8) else: req = Request( partial(getBigArray, directExecute=directExecute, recursionDepth=recursionDepth - 1)) if not directExecute: # Force this request to be submitted to the thread pool, # not executed synchronously in this thread. req.submit() result = req.wait() + 1 # Note that we expect there to be 2X memory usage here: # 1x for our result and 1x for the child, which hasn't been cleaned up yet. memory_increase = getMemoryIncrease() logger.debug("Usage delta after depth {}: {}".format( recursionDepth, memory_increase)) assert memory_increase < 2.5 * resultSize, "Memory from finished requests didn't get freed!" return result
def test_uncancellable(self): """ If a request is being waited on by a regular thread, it can't be cancelled. """ def workload(): time.sleep(0.1) return 1 def big_workload(): result = 0 requests = [] for i in range(10): requests.append( Request(workload) ) for r in requests: result += r.wait() return result req = Request(big_workload) def attempt_cancel(): time.sleep(1) req.cancel() # Start another thread that will try to cancel the request. # It won't have any effect because we're already waiting for it in a non-request thread. t = threading.Thread(target=attempt_cancel) t.start() result = req.wait() assert result == 10 t.join()
def test_uncancellable(self): """ If a request is being waited on by a regular thread, it can't be cancelled. """ def workload(): time.sleep(0.1) return 1 def big_workload(): result = 0 requests = [] for i in range(10): requests.append(Request(workload)) for r in requests: result += r.wait() return result req = Request(big_workload) def attempt_cancel(): time.sleep(1) req.cancel() # Start another thread that will try to cancel the request. # It won't have any effect because we're already waiting for it in a non-request thread. t = threading.Thread(target=attempt_cancel) t.start() result = req.wait() assert result == 10 t.join()
def test_submit_should_assign_worker_and_execute(self): def work(): return 42 req = Request(work) req.submit() assert req.wait() == 42 assert req.assigned_worker in Request.global_thread_pool.workers
def test_failed_request(self): """ A request is "failed" if it throws an exception while executing. The exception should be forwarded to ALL waiting requests. """ def impossible_workload(): raise RuntimeError("Can't service your request") req = Request(impossible_workload) try: req.wait() except RuntimeError: pass else: assert False, "Expected an exception from that request, but didn't get it."
def test(s, destination=None,): req = Request(someWork) req.onFinish(callback) s2 = req.wait()[0] time.sleep(0.001) if destination is None: destination = [""] destination[0] = s2 + s return destination
def test_request_timeout(self): """ Test the timeout feature when calling wait() from a foreign thread. See wait() for details. """ if Request.global_thread_pool.num_workers == 0: raise nose.SkipTest def slowWorkload(): time.sleep( 10.0 ) req = Request( slowWorkload ) try: req.wait(0.5) except Request.TimeoutException: pass else: assert False, "Expected to get Request.TimeoutException"
def test_request_timeout(self): """ Test the timeout feature when calling wait() from a foreign thread. See wait() for details. """ if Request.global_thread_pool.num_workers == 0: raise nose.SkipTest def slowWorkload(): time.sleep(10.0) req = Request(slowWorkload) try: req.wait(0.5) except Request.TimeoutException: pass else: assert False, "Expected to get Request.TimeoutException"
def test_old_api_support(self): """ For now, the request_rewrite supports the old interface, too. """ def someWork(destination=None): if destination is None: destination = [""] time.sleep(0.001) destination[0] = "Hello," return destination callback_result = [ [] ] def callback(result): callback_result[0] = result[0] def test(s, destination=None,): req = Request(someWork) req.onFinish(callback) s2 = req.wait()[0] time.sleep(0.001) if destination is None: destination = [""] destination[0] = s2 + s return destination req = Request( partial(test, s = " World!") ) preAllocatedResult = [""] req.writeInto(preAllocatedResult) req.notify(callback) # Wait for the result assert req.wait()[0] == "Hello, World!" # Wait for it assert callback_result[0] == "Hello, World!" # From the callback assert preAllocatedResult[0] == req.wait()[0], "This might fail if the request was started BEFORE writeInto() was called" requests = [] for i in range(10): req = Request( partial(test, s = "hallo %d" %i) ) requests.append(req) for r in requests: r.wait()
def test_signal_failed_should_be_called_on_exception(self, broken_fn): work, req = self.work_req(broken_fn) recv = mock.Mock() req = Request(work) req.notify_failed(recv) req.submit() with pytest.raises(TExc): assert req.wait() == 42 recv.assert_called_once() assert isinstance(recv.call_args[0][0], TExc)
def test_result_discarded(self): """ After a request is deleted, its result should be discarded. """ import weakref from functools import partial def f(): return numpy.zeros((10, ), dtype=numpy.uint8) + 1 w = [None] def onfinish(r, result): w[0] = weakref.ref(result) req = Request(f) req.notify_finished(partial(onfinish, req)) req.submit() req.wait() del req assert w[0]() is None
def testWorkerThreadLoopProtection(self): """ The worker threads should not die due to an exception raised within a request. """ for worker in Request.global_thread_pool.workers: assert worker.is_alive( ), "Something is wrong with this test. All workers should be alive." def always_fails(): raise Exception("This is an intentional exception for this test.") req = Request(always_fails) # Must add a default fail handler or else it will log an exception by default. req.notify_failed(lambda *args: None) try: req.submit() except: if Request.global_thread_pool.num_workers > 0: raise else: if Request.global_thread_pool.num_workers == 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" try: req.wait() except: pass else: if Request.global_thread_pool.num_workers > 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" for worker in Request.global_thread_pool.workers: assert worker.is_alive( ), "An exception was propagated to a worker run loop!"
def test_signal_failed_called_even_when_subscription_happened_after_completion(self, broken_fn): work, req = self.work_req(broken_fn) recv = mock.Mock() req = Request(work) req.submit() with pytest.raises(TExc): assert req.wait() == 42 req.notify_failed(recv) recv.assert_called_once() assert isinstance(recv.call_args[0][0], TExc)
def testWorkerThreadLoopProtection(self): """ The worker threads should not die due to an exception raised within a request. """ for worker in Request.global_thread_pool.workers: assert worker.is_alive(), "Something is wrong with this test. All workers should be alive." def always_fails(): raise Exception("This is an intentional exception for this test.") req = Request(always_fails) # Must add a default fail handler or else it will log an exception by default. req.notify_failed(lambda *args: None) try: req.submit() except: if Request.global_thread_pool.num_workers > 0: raise else: if Request.global_thread_pool.num_workers == 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" try: req.wait() except: pass else: if Request.global_thread_pool.num_workers > 0: # In the single-threaded debug mode, the exception should be raised within submit() assert False, "Expected to request to raise an Exception!" for worker in Request.global_thread_pool.workers: assert worker.is_alive(), "An exception was propagated to a worker run loop!"
def test_block_during_calback(self): """ It is valid for request finish handlers to fire off and wait for requests. This tests that feature. """ def workload(): time.sleep(0.1) return 1 total_result = [0] def handler(result): req = Request(workload) total_result[0] = result + req.wait() # Waiting on some other request from WITHIN a request callback req = Request( workload ) req.notify_finished( handler ) assert req.wait() == 1 assert total_result[0] == 2
def getBigArray(directExecute, recursionDepth): """ Simulate the memory footprint of a series of computation steps. """ logger.debug( "Usage delta before depth {}: {} MB".format(recursionDepth, getMemoryIncreaseMb() ) ) if recursionDepth == 0: # A 500GB result result = numpy.zeros(shape=resultShape, dtype=numpy.uint8) else: req = Request( partial(getBigArray, directExecute=directExecute, recursionDepth=recursionDepth-1) ) if not directExecute: # Force this request to be submitted to the thread pool, # not executed synchronously in this thread. req.submit() result = req.wait() + 1 # Note that we expect there to be 2X memory usage here: # 1x for our result and 1x for the child, which hasn't been cleaned up yet. memory_increase_mb = getMemoryIncreaseMb() logger.debug( "Usage delta after depth {}: {} MB".format(recursionDepth, memory_increase_mb ) ) assert memory_increase_mb < 2.5*resultSize, "Memory from finished requests didn't get freed!" return result
def test(s): req = Request(someWork) req.notify_finished(callback) s2 = req.wait() time.sleep(0.001) return s2 + s
def test_pool_results_discarded_REQUEST_CONTEXT(): mainreq = Request(_impl_test_pool_results_discarded) mainreq.submit() mainreq.wait()
def test_dont_cancel_shared_request(self): """ Test that a request isn't cancelled if it has requests pending for it. """ cancelled_requests = [] def f1(): time.sleep(1) return "RESULT" r1 = Request(f1) r1.notify_cancelled( partial(cancelled_requests.append, 1) ) def f2(): try: return r1.wait() except: cancelled_requests.append(2) r2 = Request(f2) def f3(): try: return r1.wait() except: cancelled_requests.append(3) r3 = Request(f3) def otherThread(): r2.wait() t = threading.Thread(target=otherThread) t.start() r3.submit() time.sleep(0.5) # By now both r2 and r3 are waiting for the result of r1 # Cancelling r3 should not cancel r1. r3.cancel() t.join() # Wait for r2 to finish time.sleep(0.5) assert r1.started assert r1.finished assert not r1.cancelled # Not cancelled, even though we cancelled a request that was waiting for it. assert 1 not in cancelled_requests assert r2.started assert r2.finished assert not r2.cancelled # Not cancelled. assert 1 not in cancelled_requests assert r2.wait() == "RESULT" assert r3.started assert r3.finished assert r3.cancelled # Successfully cancelled. assert 3 in cancelled_requests
def handler(result): req = Request(workload) total_result[0] = result + req.wait( ) # Waiting on some other request from WITHIN a request callback
def handler(result): req = Request(workload) total_result[0] = result + req.wait() # Waiting on some other request from WITHIN a request callback
def test_dont_cancel_shared_request(self): """ Test that a request isn't cancelled if it has requests pending for it. """ if Request.global_thread_pool.num_workers == 0: raise nose.SkipTest cancelled_requests = [] def f1(): time.sleep(1) return "RESULT" r1 = Request(f1) r1.notify_cancelled(partial(cancelled_requests.append, 1)) def f2(): try: return r1.wait() except: cancelled_requests.append(2) r2 = Request(f2) def f3(): try: return r1.wait() except: cancelled_requests.append(3) r3 = Request(f3) def otherThread(): r2.wait() t = threading.Thread(target=otherThread) t.start() r3.submit() time.sleep(0.5) # By now both r2 and r3 are waiting for the result of r1 # Cancelling r3 should not cancel r1. r3.cancel() t.join() # Wait for r2 to finish time.sleep(0.5) assert r1.started assert r1.finished assert not r1.cancelled # Not cancelled, even though we cancelled a request that was waiting for it. assert 1 not in cancelled_requests assert r2.started assert r2.finished assert not r2.cancelled # Not cancelled. assert 1 not in cancelled_requests assert r2.wait() == "RESULT" assert r3.started assert r3.finished assert r3.cancelled # Successfully cancelled. assert 3 in cancelled_requests
def test_impl(directExecute): rootReq = Request( partial( getBigArray, directExecute, recursionDepth=5 ) ) result = rootReq.wait() assert (result == 5).all()