Esempio n. 1
0
def _impl_test_pool_results_discarded():
    """
    After a RequestPool executes, none of its data should linger if the user didn't hang on to it.
    """
    import weakref
    from functools import partial
    import threading

    result_refs = []

    def workload():
        # In this test, all results are discarded immediately after the
        #  request exits.  Therefore, AT NO POINT IN TIME, should more than N requests be alive.
        live_result_refs = [w for w in result_refs if w() is not None]
        assert (
            len(live_result_refs) <= Request.global_thread_pool.num_workers
        ), "There should not be more than {} result references alive at one time!".format(
            Request.global_thread_pool.num_workers
        )

        return numpy.zeros((10,), dtype=numpy.uint8) + 1

    lock = threading.Lock()

    def handle_result(req, result):
        with lock:
            result_refs.append(weakref.ref(result))

    def handle_cancelled(req, *args):
        assert False

    def handle_failed(req, exc, exc_info):
        raise exc

    pool = RequestPool()
    for _ in range(100):
        req = Request(workload)
        req.notify_finished(partial(handle_result, req))
        req.notify_cancelled(partial(handle_cancelled, req))
        req.notify_failed(partial(handle_failed, req))
        pool.add(req)
        del req
    pool.wait()

    # This test verifies that
    #  (1) references to all child requests have been discarded once the pool is complete, and
    #  (2) therefore, all references to the RESULTS in those child requests are also discarded.
    # There is a tiny window of time between a request being 'complete' (for all intents and purposes),
    #  but before its main execute function has exited back to the main ThreadPool._Worker loop.
    #  The request is not finally discarded until that loop discards it, so let's wait a tiny extra bit of time.
    time.sleep(0.01)

    # Now check that ALL results are truly lost.
    for ref in result_refs:
        assert ref() is None, "Some data was not discarded."
Esempio n. 2
0
def _impl_test_pool_results_discarded():
    """
    After a RequestPool executes, none of its data should linger if the user didn't hang on to it.
    """
    import weakref
    from functools import partial
    import threading

    result_refs = []

    def workload():
        # In this test, all results are discarded immediately after the
        #  request exits.  Therefore, AT NO POINT IN TIME, should more than N requests be alive.
        live_result_refs = [w for w in result_refs if w() is not None]
        assert (
            len(live_result_refs) <= Request.global_thread_pool.num_workers
        ), "There should not be more than {} result references alive at one time!".format(
            Request.global_thread_pool.num_workers
        )

        return numpy.zeros((10,), dtype=numpy.uint8) + 1

    lock = threading.Lock()

    def handle_result(req, result):
        with lock:
            result_refs.append(weakref.ref(result))

    def handle_cancelled(req, *args):
        assert False

    def handle_failed(req, exc, exc_info):
        raise exc

    pool = RequestPool()
    for _ in range(100):
        req = Request(workload)
        req.notify_finished(partial(handle_result, req))
        req.notify_cancelled(partial(handle_cancelled, req))
        req.notify_failed(partial(handle_failed, req))
        pool.add(req)
        del req
    pool.wait()

    # This test verifies that
    #  (1) references to all child requests have been discarded once the pool is complete, and
    #  (2) therefore, all references to the RESULTS in those child requests are also discarded.
    # There is a tiny window of time between a request being 'complete' (for all intents and purposes),
    #  but before its main execute function has exited back to the main ThreadPool._Worker loop.
    #  The request is not finally discarded until that loop discards it, so let's wait a tiny extra bit of time.
    time.sleep(0.01)

    # Now check that ALL results are truly lost.
    for ref in result_refs:
        assert ref() is None, "Some data was not discarded."
Esempio n. 3
0
    def test_dont_cancel_shared_request(self):
        """
        Test that a request isn't cancelled if it has requests pending for it.
        """
        if Request.global_thread_pool.num_workers == 0:
            raise nose.SkipTest

        cancelled_requests = []

        def f1():
            time.sleep(1)
            return "RESULT"

        r1 = Request(f1)
        r1.notify_cancelled(partial(cancelled_requests.append, 1))

        def f2():
            try:
                return r1.wait()
            except:
                cancelled_requests.append(2)

        r2 = Request(f2)

        def f3():
            try:
                return r1.wait()
            except:
                cancelled_requests.append(3)

        r3 = Request(f3)

        def otherThread():
            r2.wait()

        t = threading.Thread(target=otherThread)
        t.start()
        r3.submit()

        time.sleep(0.5)

        # By now both r2 and r3 are waiting for the result of r1
        # Cancelling r3 should not cancel r1.
        r3.cancel()

        t.join()  # Wait for r2 to finish

        time.sleep(0.5)

        assert r1.started
        assert r1.finished
        assert not r1.cancelled  # Not cancelled, even though we cancelled a request that was waiting for it.
        assert 1 not in cancelled_requests

        assert r2.started
        assert r2.finished
        assert not r2.cancelled  # Not cancelled.
        assert 1 not in cancelled_requests
        assert r2.wait() == "RESULT"

        assert r3.started
        assert r3.finished
        assert r3.cancelled  # Successfully cancelled.
        assert 3 in cancelled_requests
Esempio n. 4
0
    def test_dont_cancel_shared_request(self):
        """
        Test that a request isn't cancelled if it has requests pending for it.
        """

        cancelled_requests = []
        
        def f1():
            time.sleep(1)
            return "RESULT"
        
        r1 = Request(f1)
        r1.notify_cancelled( partial(cancelled_requests.append, 1) )
        
        def f2():
            try:
                return r1.wait()
            except:
                cancelled_requests.append(2)

        r2 = Request(f2)
        
        def f3():
            try:
                return r1.wait()
            except:
                cancelled_requests.append(3)
        
        r3 = Request(f3)
        
        def otherThread():
            r2.wait()

        t = threading.Thread(target=otherThread)
        t.start()
        r3.submit()
        
        time.sleep(0.5)
        
        # By now both r2 and r3 are waiting for the result of r1
        # Cancelling r3 should not cancel r1.
        r3.cancel()

        t.join() # Wait for r2 to finish

        time.sleep(0.5)

        assert r1.started
        assert r1.finished        
        assert not r1.cancelled # Not cancelled, even though we cancelled a request that was waiting for it.
        assert 1 not in cancelled_requests 

        assert r2.started
        assert r2.finished
        assert not r2.cancelled # Not cancelled.
        assert 1 not in cancelled_requests
        assert r2.wait() == "RESULT" 

        assert r3.started
        assert r3.finished
        assert r3.cancelled # Successfully cancelled.
        assert 3 in cancelled_requests