Esempio n. 1
0
    def testExceptionPropagation(self):
        """
        When an exception is generated in a request, the exception should be propagated to all waiting threads.
        Also, the failure signal should fire.
        """
        class SpecialException(Exception):
            pass
         
        def always_fails():
            time.sleep(0.2)
            raise SpecialException()
         
        req1 = Request(always_fails)
 
 
        def wait_for_req1():
            req1.wait()
         
        req2 = Request(wait_for_req1)
        req3 = Request(wait_for_req1)
 
        signaled_exceptions = []
        def failure_handler(ex, exc_info):
            signaled_exceptions.append(ex)
             
        req2.notify_failed( failure_handler )
        req3.notify_failed( failure_handler )
 
        caught_exceptions = []        
        def wait_for_request(req):        
            try:
                req.wait()
            except SpecialException as ex:
                caught_exceptions.append(ex)
            except:
                raise # Got some other exception than the one we expected
            else:
                assert "Expected to get an exception.  Didn't get one."
 
        th2 = threading.Thread( target=partial( wait_for_request, req2 ) )
        th3 = threading.Thread( target=partial( wait_for_request, req3 ) )
         
        th2.start()
        th3.start()
         
        th2.join()
        th3.join()
         
        assert len(caught_exceptions) == 2, "Expected both requests to catch exceptions."
        assert len(signaled_exceptions) == 2, "Expected both requests to signal failure."
 
        assert isinstance( caught_exceptions[0], SpecialException ), "Caught exception was of the wrong type."
        assert caught_exceptions[0] == caught_exceptions[1] == signaled_exceptions[0] == signaled_exceptions[1]
         
        # Attempting to wait for a request that has already failed will raise the exception that causes the failure
        wait_for_request(req2)
         
        # Subscribing to notify_failed on a request that's already failed should call the failure handler immediately.
        req2.notify_failed( failure_handler )
        assert len(signaled_exceptions) == 3
Esempio n. 2
0
    def test_basic(self):
        """
        Fire a couple requests and check the answer they give.
        """
        def someWork():
            time.sleep(0.001)
            return "Hello,"
         
        callback_result = ['']
        def callback(result):
            callback_result[0] = result
 
        def test(s):
            req = Request(someWork)
            req.notify_finished(callback)
            s2 = req.wait()
            time.sleep(0.001)
            return s2 + s
 
        req = Request( partial(test, s = " World!") )
        req.notify_finished(callback)
         
        # Wait for the result
        assert req.wait() == "Hello, World!"         # Wait for it
        assert req.wait() == "Hello, World!"         # It's already finished, should be same answer
        assert callback_result[0] == "Hello, World!" # From the callback
 
        requests = []
        for i in range(10):
            req = Request( partial(test, s = "hallo %d" %i) )
            requests.append(req)
 
        for r in requests:
            r.wait()
    def test_failed_request2(self):
        """
        A request is "failed" if it throws an exception while executing.
        The exception should be forwarded to ALL waiting requests, which should re-raise it.
        """
        class CustomRuntimeError(RuntimeError):
            pass

        def impossible_workload():
            time.sleep(0.2)
            raise CustomRuntimeError("Can't service your request")

        impossible_req = Request(impossible_workload)

        def wait_for_impossible():
            # This request will fail...
            impossible_req.wait()

            # Since there are some exception guards in the code we're testing,
            #  spit something out to stderr just to be sure this error
            #  isn't getting swallowed accidentally.
            sys.stderr.write("ERROR: Shouldn't get here.")
            assert False, "Shouldn't get here."

        req1 = Request(wait_for_impossible)
        req2 = Request(wait_for_impossible)

        failed_ids = []
        lock = threading.Lock()

        def handle_failed_req(req_id, failure_exc, exc_info):
            assert isinstance(failure_exc, CustomRuntimeError)
            with lock:
                failed_ids.append(req_id)

        req1.notify_failed(partial(handle_failed_req, 1))
        req2.notify_failed(partial(handle_failed_req, 2))

        req1.submit()
        req2.submit()

        try:
            req1.wait()
        except RuntimeError:
            pass
        else:
            assert False, "Expected an exception from that request, but didn't get it."

        try:
            req2.wait()
        except RuntimeError:
            pass
        else:
            assert False, "Expected an exception from that request, but didn't get it."

        assert 1 in failed_ids
        assert 2 in failed_ids
Esempio n. 4
0
    def test_submit_dependent_requests_should_execute_on_same_worker(self):
        more_work = Request(Work(lambda: 42))

        req = Request(Work(lambda: more_work.wait()))
        req.submit()

        assert req.wait() == 42
        assert req.assigned_worker in Request.global_thread_pool.workers
        assert req.assigned_worker == more_work.assigned_worker
Esempio n. 5
0
    def testBasic(self):
        def work():
            time.sleep(0.2)

        reqs = []
        for _ in range(10):
            reqs.append(Request(work))

        pool = RequestPool()
        for req in reqs:
            pool.add(req)

        pool.submit()

        # All requests should be run in parallel...
        for req in reqs:
            assert req.started
            if Request.global_thread_pool.num_workers > 0:
                assert not req.finished

        pool.wait()

        # Should all be done.
        for req in reqs:
            assert req.finished
Esempio n. 6
0
    def testWorkerThreadLoopProtection(self):
        """
        The worker threads should not die due to an exception raised within a request.
        """
        for worker in Request.global_thread_pool.workers:
            assert worker.is_alive(
            ), "Something is wrong with this test.  All workers should be alive."

        def always_fails():
            raise Exception()

        req = Request(always_fails)

        try:
            req.submit()
        except:
            if Request.global_thread_pool.num_workers > 0:
                raise
        else:
            if Request.global_thread_pool.num_workers == 0:
                # In the single-threaded debug mode, the exception should be raised within submit()
                assert False, "Expected to request to raise an Exception!"

        try:
            req.wait()
        except:
            pass
        else:
            if Request.global_thread_pool.num_workers > 0:
                # In the single-threaded debug mode, the exception should be raised within submit()
                assert False, "Expected to request to raise an Exception!"

        for worker in Request.global_thread_pool.workers:
            assert worker.is_alive(
            ), "An exception was propagated to a worker run loop!"
Esempio n. 7
0
    def test_uncancellable(self):
        """
        If a request is being waited on by a regular thread, it can't be cancelled.
        """
        def workload():
            time.sleep(0.1)
            return 1

        def big_workload():
            result = 0
            requests = []
            for i in range(10):
                requests.append(Request(workload))

            for r in requests:
                result += r.wait()
            return result

        req = Request(big_workload)

        def attempt_cancel():
            time.sleep(1)
            req.cancel()

        # Start another thread that will try to cancel the request.
        # It won't have any effect because we're already waiting for it in a non-request thread.
        t = threading.Thread(target=attempt_cancel)
        t.start()
        result = req.wait()
        assert result == 10

        t.join()
Esempio n. 8
0
    def test_callbacks_before_wait_returns(self):
        """
        If the user adds callbacks to the request via notify_finished() BEFORE the request is submitted,
        then wait() should block for the completion of all those callbacks before returning.
        Any callbacks added AFTER the request has already been submitted are NOT guaranteed 
        to be executed before wait() returns, but they will still be executed.
        """
        def someQuickWork():
            return 42

        callback_results = []

        def slowCallback(n, result):
            time.sleep(0.1)
            callback_results.append(n)

        req = Request(someQuickWork)
        req.notify_finished(partial(slowCallback, 1))
        req.notify_finished(partial(slowCallback, 2))
        req.notify_finished(partial(slowCallback, 3))

        result = req.wait()
        assert result == 42
        assert callback_results == [
            1, 2, 3
        ], "wait() returned before callbacks were complete! Got: {}".format(
            callback_results)

        req.notify_finished(partial(slowCallback, 4))
        req.wait()
        assert callback_results == [
            1, 2, 3, 4
        ], "Callback on already-finished request wasn't executed."
Esempio n. 9
0
    def test_cancel_basic(self):
        """
        Start a workload and cancel it.  Verify that it was actually cancelled before all the work was finished.
        """
        if Request.global_thread_pool.num_workers == 0:
            raise nose.SkipTest

        def workload():
            time.sleep(0.1)
            return 1

        got_cancel = [False]
        workcounter = [0]

        def big_workload():
            try:
                requests = []
                for i in range(100):
                    requests.append(Request(workload))

                for r in requests:
                    workcounter[0] += r.wait()

                assert False, "Shouldn't get to this line.  This test is designed so that big_workload should be cancelled before it finishes all its work"
                for r in requests:
                    assert not r.cancelled
            except Request.CancellationException:
                got_cancel[0] = True
            except Exception as ex:
                import traceback
                traceback.print_exc()
                raise

        completed = [False]

        def handle_complete(result):
            completed[0] = True

        req = Request(big_workload)
        req.notify_finished(handle_complete)
        req.submit()

        while workcounter[0] == 0:
            time.sleep(0.001)

        req.cancel()
        time.sleep(1)

        assert req.cancelled

        assert not completed[0]
        assert got_cancel[0]

        # Make sure this test is functioning properly:
        # The cancellation should have occurred in the middle (not before the request even got started)
        # If not, then adjust the timing of the cancellation, above.
        assert workcounter[
            0] != 0, "This timing-sensitive test needs to be tweaked."
        assert workcounter[
            0] != 100, "This timing-sensitive test needs to be tweaked."
Esempio n. 10
0
def work():
    unpause = threading.Event()
    children = []

    def work_fn():
        more_work = Request(lambda: 42)
        some_more_work = Request(lambda: 42)
        children.extend([more_work, some_more_work])
        unpause.wait()
        return more_work.wait()

    work = Work(work_fn)
    work.unpause = unpause

    work.request = Request(work)
    work.request.submit()
    work.children = children

    assert work.started.wait()

    yield work

    if not work.unpause.is_set():
        work.unpause.set()

    assert work.done.wait()
Esempio n. 11
0
    def test_result_discarded(self):
        """
        After a request is deleted, its result should be discarded.
        """
        import weakref
        from functools import partial

        def f():
            return numpy.zeros((10, ), dtype=numpy.uint8) + 1

        w = [None]

        def onfinish(r, result):
            w[0] = weakref.ref(result)

        req = Request(f)
        req.notify_finished(partial(onfinish, req))

        req.submit()
        req.wait()
        del req

        # The ThreadPool._Worker loop has a local reference (next_task),
        # so wait just a tic for the ThreadPool worker to cycle back to the top of its loop (and discard the reference)
        time.sleep(0.1)
        assert w[0]() is None
Esempio n. 12
0
def test_pool_with_failed_requests():
    """
    When one of the requests in a RequestPool fails,
    the exception should be propagated back to the caller of RequestPool.wait()
    """

    class ExpectedException(Exception):
        pass

    l = []
    pool = RequestPool()

    def workload(index):
        if index == 9:
            raise ExpectedException("Intentionally failed request.")
        l.append(index)

    for i in range(10):
        pool.add(Request(partial(workload, i)))

    try:
        pool.wait()
    except ExpectedException:
        pass
    else:
        assert False, "Expected the pool to fail.  Why didn't it?"

    time.sleep(0.2)
        def getBigArray(directExecute, recursionDepth):
            """
            Simulate the memory footprint of a series of computation steps.
            """
            logger.debug("Usage delta before depth {}: {}".format(
                recursionDepth, getMemoryIncrease()))

            if recursionDepth == 0:
                # A 500MB result
                result = numpy.zeros(shape=resultShape, dtype=numpy.uint8)
            else:
                req = Request(
                    partial(getBigArray,
                            directExecute=directExecute,
                            recursionDepth=recursionDepth - 1))
                if not directExecute:
                    # Force this request to be submitted to the thread pool,
                    # not executed synchronously in this thread.
                    req.submit()
                result = req.wait() + 1

            # Note that we expect there to be 2X memory usage here:
            #  1x for our result and 1x for the child, which hasn't been cleaned up yet.
            memory_increase = getMemoryIncrease()
            logger.debug("Usage delta after depth {}: {}".format(
                recursionDepth, memory_increase))
            assert memory_increase < 2.5 * resultSize, "Memory from finished requests didn't get freed!"

            return result
Esempio n. 14
0
    def test_submit_should_assign_worker_and_execute(self):
        def work():
            return 42

        req = Request(work)
        req.submit()
        assert req.wait() == 42
        assert req.assigned_worker in Request.global_thread_pool.workers
Esempio n. 15
0
        def big_workload():
            result = 0
            requests = []
            for i in range(10):
                requests.append(Request(workload))

            for r in requests:
                result += r.wait()
            return result
Esempio n. 16
0
    def testRequestLock(self):
        """
        Test the special Request-aware lock.
         
        Launch 99 requests and threads that all must fight over access to the same list.
        The list will eventually be 0,1,2...99, and each request will append a single number to the list.
        Each request must wait its turn before it can append it's number and finish.
        """
        # This test doesn't work if the request system is working in single-threaded 'debug' mode.
        # It depends on concurrent execution to make progress.  Otherwise it hangs.
        if Request.global_thread_pool.num_workers == 0:
            raise nose.SkipTest

        req_lock = RequestLock()
        l = [0]

        def append_n(n):
            #print "Starting append_{}\n".format(n)
            while True:
                with req_lock:
                    if l[-1] == n - 1:
                        #print "***** Appending {}".format(n)
                        l.append(n)
                        return

        # Create 50 requests
        N = 50
        reqs = []
        for i in range(1, 2 * N, 2):
            req = Request(partial(append_n, i))
            reqs.append(req)

        # Create 49 threads
        thrds = []
        for i in range(2, 2 * N, 2):
            thrd = threading.Thread(target=partial(append_n, i))
            thrds.append(thrd)

        # Submit in reverse order to ensure that no request finishes until they have all been started.
        # This proves that the requests really are being suspended.
        for req in reversed(reqs):
            req.submit()

        # Start all the threads
        for thrd in reversed(thrds):
            thrd.start()

        # All requests must finish
        for req in reqs:
            req.wait()

        # All threads should finish
        for thrd in thrds:
            thrd.join()

        assert l == list(
            range(100)), "Requests and/or threads finished in the wrong order!"
Esempio n. 17
0
def _impl_test_pool_results_discarded():
    """
    After a RequestPool executes, none of its data should linger if the user didn't hang on to it.
    """
    import weakref
    from functools import partial
    import threading

    result_refs = []

    def workload():
        # In this test, all results are discarded immediately after the
        #  request exits.  Therefore, AT NO POINT IN TIME, should more than N requests be alive.
        live_result_refs = [w for w in result_refs if w() is not None]
        assert (
            len(live_result_refs) <= Request.global_thread_pool.num_workers
        ), "There should not be more than {} result references alive at one time!".format(
            Request.global_thread_pool.num_workers
        )

        return numpy.zeros((10,), dtype=numpy.uint8) + 1

    lock = threading.Lock()

    def handle_result(req, result):
        with lock:
            result_refs.append(weakref.ref(result))

    def handle_cancelled(req, *args):
        assert False

    def handle_failed(req, exc, exc_info):
        raise exc

    pool = RequestPool()
    for _ in range(100):
        req = Request(workload)
        req.notify_finished(partial(handle_result, req))
        req.notify_cancelled(partial(handle_cancelled, req))
        req.notify_failed(partial(handle_failed, req))
        pool.add(req)
        del req
    pool.wait()

    # This test verifies that
    #  (1) references to all child requests have been discarded once the pool is complete, and
    #  (2) therefore, all references to the RESULTS in those child requests are also discarded.
    # There is a tiny window of time between a request being 'complete' (for all intents and purposes),
    #  but before its main execute function has exited back to the main ThreadPool._Worker loop.
    #  The request is not finally discarded until that loop discards it, so let's wait a tiny extra bit of time.
    time.sleep(0.01)

    # Now check that ALL results are truly lost.
    for ref in result_refs:
        assert ref() is None, "Some data was not discarded."
    def testSimpleRequestCondition(self):
        """
        Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality.
        (See the docs for details.)
        """
        Request.reset_thread_pool(num_workers=1)
        N_ELEMENTS = 10

        # It's tempting to simply use threading.Condition here,
        #  but that doesn't quite work if the thread calling wait() is also a worker thread.
        # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.)
        # cond = threading.Condition( RequestLock() )
        cond = SimpleRequestCondition()

        produced = []
        consumed = []

        def wait_for_all():
            def f(i):
                time.sleep(0.2 * random.random())
                with cond:
                    produced.append(i)
                    cond.notify()

            reqs = []
            for i in range(N_ELEMENTS):
                req = Request(partial(f, i))
                reqs.append(req)

            for req in reqs:
                req.submit()

            _consumed = consumed
            with cond:
                while len(_consumed) < N_ELEMENTS:
                    while len(_consumed) == len(produced):
                        cond.wait()
                    logger.debug("copying {} elements".format(
                        len(produced) - len(consumed)))
                    _consumed += produced[len(_consumed):]

        # Force the request to run in a worker thread.
        # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock.
        req = Request(wait_for_all)
        req.submit()

        # Now block for completion
        req.wait()

        logger.debug("produced: {}".format(produced))
        logger.debug("consumed: {}".format(consumed))
        assert set(consumed) == set(
            range(N_ELEMENTS)
        ), "Expected set(range(N_ELEMENTS)), got {}".format(consumed)
Esempio n. 19
0
    def test_should_be_called_after_request_finishes(self):
        cb = mock.Mock()

        req = Request(lambda: 42)
        req.add_done_callback(cb)
        cb.assert_not_called()

        req.submit()
        req.wait()

        cb.assert_called_once_with(req)
Esempio n. 20
0
    def test_if_request_finished_should_call_immidiatelly(self):
        cb = mock.Mock()

        def work():
            return 42

        req = Request(work)
        req.submit()
        req.wait()
        req.add_done_callback(cb)
        cb.assert_called_once_with(req)
Esempio n. 21
0
    def test_lotsOfSmallRequests(self):
        """
        Fire off some reasonably large random number of nested requests.
        Mostly, this test ensures that the requests all complete without a hang.
        """
        handlerCounter = [0]
        handlerLock = threading.Lock()

        def completionHandler(result, req):
            logger.debug("Handing completion {}".format(result))
            handlerLock.acquire()
            handlerCounter[0] += 1
            handlerLock.release()
            req.calledHandler = True

        requestCounter = [0]
        requestLock = threading.Lock()
        allRequests = []

        # This closure randomly chooses to either (a) return immediately or (b) fire off more work
        def someWork(depth, force=False, i=-1):
            #print 'depth=', depth, 'i=', i
            if depth > 0 and (force or random.random() > 0.5):
                requests = []
                for i in range(10):
                    req = Request(partial(someWork, depth=depth - 1, i=i))
                    req.notify_finished(partial(completionHandler, req=req))
                    requests.append(req)
                    allRequests.append(req)

                    requestLock.acquire()
                    requestCounter[0] += 1
                    requestLock.release()

                for r in requests:
                    r.wait()

            return requestCounter[0]

        req = Request(partial(someWork, depth=4, force=True))

        logger.debug("Waiting for requests...")
        req.wait()
        logger.debug("root request finished")

        # Handler should have been called once for each request we fired
        assert handlerCounter[0] == requestCounter[0]

        logger.debug("finished testLotsOfSmallRequests")

        for r in allRequests:
            assert r.finished

        logger.debug("waited for all subrequests")
Esempio n. 22
0
    def test_if_request_has_been_cancelled_callback_should_still_be_called(self):
        cb = mock.Mock()

        req = Request(lambda: 42)
        req.cancel()
        req.add_done_callback(cb)
        req.submit()

        with pytest.raises(Request.InvalidRequestException):
            req.wait()

        cb.assert_called_once_with(req)
Esempio n. 23
0
    def _onExportTifButtonPressed(self):
        options = QFileDialog.Options()
        if ilastik_config.getboolean("ilastik", "debug"):
            options |= QFileDialog.DontUseNativeDialog

        directory = encode_from_qstring(QFileDialog.getExistingDirectory(self, 'Select Directory',os.path.expanduser("~"), options=options))

        if directory is None or len(str(directory)) == 0:
            logger.info( "cancelled." )
            return

        logger.info( 'Saving results as tiffs...' )

        label2color = self.mainOperator.label2color
        lshape = list(self.mainOperator.LabelImage.meta.shape)

        def _handle_progress(x):
            self.applet.progressSignal.emit(x)

        def _export():
            num_files = float(len(label2color))
            for t, label2color_at in enumerate(label2color):
                if len(label2color_at) == 0:
                    continue
                logger.info( 'exporting tiffs for t = ' + str(t) )

                roi = SubRegion(self.mainOperator.LabelImage, start=[t,] + 4*[0,], stop=[t+1,] + list(lshape[1:]))
                labelImage = self.mainOperator.LabelImage.get(roi).wait()
                relabeled = relabel(labelImage[0,...,0],label2color_at)
                for i in range(relabeled.shape[2]):
                    out_im = relabeled[:,:,i]
                    out_fn = str(directory) + '/vis_t' + str(t).zfill(4) + '_z' + str(i).zfill(4) + '.tif'
                    vigra.impex.writeImage(np.asarray(out_im,dtype=np.uint32), out_fn)

                _handle_progress(t/num_files * 100)
            logger.info( 'Tiffs exported.' )

        def _handle_finished(*args):
            self._drawer.exportTifButton.setEnabled(True)
            self.applet.progressSignal.emit(100)

        def _handle_failure( exc, exc_info ):
            msg = "Exception raised during export.  See traceback above.\n"
            log_exception( logger, msg, exc_info )
            self.applet.progressSignal.emit(100)
            self._drawer.exportTifButton.setEnabled(True)

        self._drawer.exportTifButton.setEnabled(False)
        self.applet.progressSignal.emit(0)
        req = Request( _export )
        req.notify_failed( _handle_failure )
        req.notify_finished( _handle_finished )
        req.submit()
Esempio n. 24
0
    def test_signal_failed_should_be_called_on_exception(self, broken_fn):
        work, req = self.work_req(broken_fn)

        recv = mock.Mock()

        req = Request(work)
        req.notify_failed(recv)
        req.submit()

        with pytest.raises(TExc):
            assert req.wait() == 42
        recv.assert_called_once()
        assert isinstance(recv.call_args[0][0], TExc)
    def testRequestLock(self):
        """
        Test the special Request-aware lock.
        
        Launch 99 requests and threads that all must fight over access to the same list.
        The list will eventually be 0,1,2...99, and each request will append a single number to the list.
        Each request must wait its turn before it can append it's number and finish.
        """
        req_lock = RequestLock()
        l = [0]

        def append_n(n):
            #print "Starting append_{}\n".format(n)
            while True:
                with req_lock:
                    if l[-1] == n - 1:
                        #print "***** Appending {}".format(n)
                        l.append(n)
                        return

        # Create 50 requests
        reqs = []
        for i in range(1, 100, 2):
            req = Request(partial(append_n, i))
            reqs.append(req)

        # Create 49 threads
        thrds = []
        for i in range(2, 100, 2):
            thrd = threading.Thread(target=partial(append_n, i))
            thrds.append(thrd)

        # Submit in reverse order to ensure that no request finishes until they have all been started.
        # This proves that the requests really are being suspended.
        for req in reversed(reqs):
            req.submit()

        # Start all the threads
        for thrd in reversed(thrds):
            thrd.start()

        # All requests must finish
        for req in reqs:
            req.wait()

        # All threads should finish
        for thrd in thrds:
            thrd.join()

        assert l == list(
            range(100)), "Requests and/or threads finished in the wrong order!"
Esempio n. 26
0
    def test_signal_failed_called_even_when_subscription_happened_after_completion(self, broken_fn):
        work, req = self.work_req(broken_fn)

        recv = mock.Mock()

        req = Request(work)
        req.submit()

        with pytest.raises(TExc):
            assert req.wait() == 42

        req.notify_failed(recv)
        recv.assert_called_once()
        assert isinstance(recv.call_args[0][0], TExc)
Esempio n. 27
0
    def test_if_request_failed_callback_should_still_be_called(self):
        cb = mock.Mock()

        def work():
            raise Exception()

        req = Request(work)
        req.add_done_callback(cb)
        req.submit()

        with pytest.raises(Exception):
            req.wait()

        cb.assert_called_once_with(req)
Esempio n. 28
0
 def test_early_cancel(self):
     """
     If you try to wait for a request after it's already been cancelled, you get a InvalidRequestException.
     """
     def f():
         pass
     req = Request(f)
     req.cancel()
     try:
         req.wait()
     except Request.InvalidRequestException:
         pass
     else:
         assert False, "Expected a Request.InvalidRequestException because we're waiting for a request that's already been cancelled."
        def big_workload():
            try:
                requests = []
                for i in range(100):
                    requests.append(Request(workload))

                for r in requests:
                    workcounter[0] += r.wait()

                assert False, "Shouldn't get to this line.  This test is designed so that big_workload should be cancelled before it finishes all its work"
                for r in requests:
                    assert not r.cancelled
            except Request.CancellationException:
                got_cancel[0] = True
Esempio n. 30
0
    def test_callWaitDuringCallback(self):
        """
        When using request.notify_finished(...) to handle request completions, 
        the handler should be allowed to call request.wait() on the request that it's handling.
        """
        def handler(req, result):
            req.wait()

        def workFn():
            pass

        req = Request(workFn)
        req.notify_finished(partial(handler, req))
        #req.submit()
        req.wait()