Exemple #1
0
def test_empty_pool():
    """
    Test the edge case when we wait() for a
    RequestPool that has no requests in it.
    """
    pool = RequestPool()
    pool.wait()
Exemple #2
0
def test_pool_with_failed_requests():
    """
    When one of the requests in a RequestPool fails, 
    the exception should be propagated back to the caller of RequestPool.wait()
    """
    class ExpectedException(Exception): pass
     
    l = []
    pool = RequestPool()
    def workload(index):
        if index == 9:
            raise ExpectedException("Intentionally failed request.")
        l.append(index)
     
    for i in range(10):
        pool.add( Request(partial(workload, i)) )
 
    try:
        pool.wait()
    except ExpectedException:
        pass
    else:
        assert False, "Expected the pool to fail.  Why didn't it?"
 
    time.sleep(0.2)
Exemple #3
0
def _impl_test_pool_results_discarded():
    """
    After a RequestPool executes, none of its data should linger if the user didn't hang on to it.
    """
    import weakref
    from functools import partial
    import threading

    result_refs = []

    def workload():
        # In this test, all results are discarded immediately after the
        #  request exits.  Therefore, AT NO POINT IN TIME, should more than N requests be alive.
        live_result_refs = [w for w in result_refs if w() is not None]
        assert (
            len(live_result_refs) <= Request.global_thread_pool.num_workers
        ), "There should not be more than {} result references alive at one time!".format(
            Request.global_thread_pool.num_workers
        )

        return numpy.zeros((10,), dtype=numpy.uint8) + 1

    lock = threading.Lock()

    def handle_result(req, result):
        with lock:
            result_refs.append(weakref.ref(result))

    def handle_cancelled(req, *args):
        assert False

    def handle_failed(req, exc, exc_info):
        raise exc

    pool = RequestPool()
    for _ in range(100):
        req = Request(workload)
        req.notify_finished(partial(handle_result, req))
        req.notify_cancelled(partial(handle_cancelled, req))
        req.notify_failed(partial(handle_failed, req))
        pool.add(req)
        del req
    pool.wait()

    # This test verifies that
    #  (1) references to all child requests have been discarded once the pool is complete, and
    #  (2) therefore, all references to the RESULTS in those child requests are also discarded.
    # There is a tiny window of time between a request being 'complete' (for all intents and purposes),
    #  but before its main execute function has exited back to the main ThreadPool._Worker loop.
    #  The request is not finally discarded until that loop discards it, so let's wait a tiny extra bit of time.
    time.sleep(0.01)

    # Now check that ALL results are truly lost.
    for ref in result_refs:
        assert ref() is None, "Some data was not discarded."
Exemple #4
0
    def testBasic(self):
        def work():
            time.sleep(0.2)

        reqs = []        
        for _ in range(10):
            reqs.append( Request( work ) )

        pool = RequestPool()
        for req in reqs:
            pool.add(req)

        pool.wait()

        # Should all be done.        
        for req in reqs:
            assert req.finished
Exemple #5
0
    def testBasic(self):
        def work():
            time.sleep(0.2)

        reqs = []        
        for _ in range(10):
            reqs.append( Request( work ) )

        pool = RequestPool()
        for req in reqs:
            pool.add(req)

        pool.submit()
        
        # All requests should be run in parallel...
        for req in reqs:
            assert req.started
            if Request.global_thread_pool.num_workers > 0:
                assert not req.finished
        
        pool.wait()

        # Should all be done.        
        for req in reqs:
            assert req.finished
Exemple #6
0
def test_basic():
    """
    Check if a request pool executes all added requests.
    """
    # threadsafe way to count how many requests ran
    import itertools
    result_counter = itertools.count()

    def increase_counter():
        time.sleep(0.1)
        result_counter.next()

    pool = RequestPool()
    for i in xrange(500):
        pool.add(Request(increase_counter))
    pool.wait()

    assert result_counter.next() == 500, "RequestPool has not run all submitted requests {} out of 500".format(result_counter.next() - 1)
Exemple #7
0
def test_pool_with_failed_requests():
    """
    When one of the requests in a RequestPool fails, 
    the exception should be propagated back to the caller of RequestPool.wait()
    """
    class ExpectedException(Exception):
        pass

    l = []
    pool = RequestPool()

    def workload(index):
        if index == 9:
            raise ExpectedException("Intentionally failed request.")
        l.append(index)

    for i in range(10):
        pool.add(Request(partial(workload, i)))

    try:
        pool.wait()
    except ExpectedException:
        pass
    else:
        assert False, "Expected the pool to fail.  Why didn't it?"

    time.sleep(0.2)
Exemple #8
0
def test_cleanup():
    """
    Check if requests added to a RequestPool are cleaned when they are
    completed without waiting for the RequestPool itself to be cleaned.
    """
    cur_process = psutil.Process(os.getpid())
    def getMemoryUsage():
        # Collect garbage first
        gc.collect()
        return cur_process.memory_info().vms
        #return mem_usage_mb

    starting_usage = getMemoryUsage()
    def getMemoryIncrease():
        return getMemoryUsage() - starting_usage


    num_workers = len(Request.global_thread_pool.workers)
    # maximum memory this tests should use
    # tests should not cause the machine to swap unnecessarily
    max_mem = 1<<29 # 512 Mb
    mem_per_req = max_mem / num_workers

    # some leeway
    max_allowed_mem = (max_mem + 2*mem_per_req)

    def memoryhog():
        increase = getMemoryIncrease()
        assert increase < max_allowed_mem, "memory use should not go beyond {}, current use: {}".format(max_mem, increase)
        return numpy.zeros(mem_per_req, dtype=numpy.uint8)

    pool = RequestPool()
    for i in xrange(num_workers**2):
        pool.add(Request(memoryhog))

    pool.wait()

    assert len(pool._requests) == 0, "Not all requests were executed by the RequestPool"
Exemple #9
0
def _impl_test_pool_results_discarded():
    """
    After a RequestPool executes, none of its data should linger if the user didn't hang on to it.
    """
    import weakref
    from functools import partial
    import threading

    result_refs = []

    def workload():
        # In this test, all results are discarded immediately after the
        #  request exits.  Therefore, AT NO POINT IN TIME, should more than N requests be alive.
        live_result_refs = [w for w in result_refs if w() is not None]
        assert (
            len(live_result_refs) <= Request.global_thread_pool.num_workers
        ), "There should not be more than {} result references alive at one time!".format(
            Request.global_thread_pool.num_workers
        )

        return numpy.zeros((10,), dtype=numpy.uint8) + 1

    lock = threading.Lock()

    def handle_result(req, result):
        with lock:
            result_refs.append(weakref.ref(result))

    def handle_cancelled(req, *args):
        assert False

    def handle_failed(req, exc, exc_info):
        raise exc

    pool = RequestPool()
    for _ in range(100):
        req = Request(workload)
        req.notify_finished(partial(handle_result, req))
        req.notify_cancelled(partial(handle_cancelled, req))
        req.notify_failed(partial(handle_failed, req))
        pool.add(req)
        del req
    pool.wait()

    # This test verifies that
    #  (1) references to all child requests have been discarded once the pool is complete, and
    #  (2) therefore, all references to the RESULTS in those child requests are also discarded.
    # There is a tiny window of time between a request being 'complete' (for all intents and purposes),
    #  but before its main execute function has exited back to the main ThreadPool._Worker loop.
    #  The request is not finally discarded until that loop discards it, so let's wait a tiny extra bit of time.
    time.sleep(0.01)

    # Now check that ALL results are truly lost.
    for ref in result_refs:
        assert ref() is None, "Some data was not discarded."
Exemple #10
0
    def testBasic(self):
        def work():
            time.sleep(0.2)

        reqs = []
        for _ in range(10):
            reqs.append(Request(work))

        pool = RequestPool()
        for req in reqs:
            pool.add(req)

        pool.wait()

        # Should all be done.
        for req in reqs:
            assert req.finished
Exemple #11
0
    def testBasic(self):
        def work():
            time.sleep(0.2)

        reqs = []        
        for _ in range(10):
            reqs.append( Request( work ) )

        pool = RequestPool()
        for req in reqs:
            pool.add(req)

        pool.submit()
        
        # All requests should be run in parallel...
        for req in reqs:
            assert req.started
            assert not req.finished
        
        pool.wait()

        # Should all be done.        
        for req in reqs:
            assert req.finished
Exemple #12
0
def test_basic():
    """
    Check if a request pool executes all added requests.
    """
    # threadsafe way to count how many requests ran
    import itertools
    result_counter = itertools.count()

    def increase_counter():
        time.sleep(0.1)
        result_counter.next()

    pool = RequestPool()
    for i in xrange(500):
        pool.add(Request(increase_counter))
    pool.wait()

    assert result_counter.next(
    ) == 500, "RequestPool has not run all submitted requests {} out of 500".format(
        result_counter.next() - 1)
Exemple #13
0
def test_cleanup():
    """
    Check if requests added to a RequestPool are cleaned when they are
    completed without waiting for the RequestPool itself to be cleaned.
    """
    cur_process = psutil.Process(os.getpid())

    def getMemoryUsage():
        # Collect garbage first
        gc.collect()
        return cur_process.memory_info().vms
        #return mem_usage_mb

    starting_usage = getMemoryUsage()

    def getMemoryIncrease():
        return getMemoryUsage() - starting_usage

    num_workers = len(Request.global_thread_pool.workers)
    # maximum memory this tests should use
    # tests should not cause the machine to swap unnecessarily
    max_mem = 1 << 29  # 512 Mb
    mem_per_req = max_mem / num_workers

    # some leeway
    max_allowed_mem = (max_mem + 2 * mem_per_req)

    def memoryhog():
        increase = getMemoryIncrease()
        assert increase < max_allowed_mem, "memory use should not go beyond {}, current use: {}".format(
            max_mem, increase)
        return numpy.zeros(mem_per_req, dtype=numpy.uint8)

    pool = RequestPool()
    for i in xrange(num_workers**2):
        pool.add(Request(memoryhog))

    pool.wait()

    assert len(pool._requests
               ) == 0, "Not all requests were executed by the RequestPool"