Exemple #1
0
    def test_base(self):
        p = ThreadPool()

        # default pools have no workers - and threading was removed entirely ...
        assert p.size() == 0

        # SINGLE TASK SERIAL SYNC MODE
        ##############################
        # put a few unrelated tasks that we forget about - check ref counts and cleanup
        t1, t2 = FixtureThreadTask(iter(list()), "nothing1",
                                   None), FixtureThreadTask(
                                       iter(list()), "nothing2", None)
        urc1 = p.add_task(t1)
        urc2 = p.add_task(t2)
        assert p.num_tasks() == 2

        # test pool reader
        assert urc1.pool_ref()() is p
        assert urc1.task_ref()() is t1
        assert urc1.pool() == p
        assert urc1.task() == t1

        ## SINGLE TASK #################
        self._assert_single_task(p, False)
        if py2:
            assert p.num_tasks() == 2
        del (urc1)
        if py2:
            assert p.num_tasks() == 1

        p.remove_task(t2)
        if py2:
            assert p.num_tasks() == 0
        assert sys.getrefcount(t2) == 2

        t3 = FixtureChannelThreadTask(urc2, "channel", None)
        urc3 = p.add_task(t3)
        if py2:
            assert p.num_tasks() == 1
        del (urc3)
        if py2:
            assert p.num_tasks() == 0
        assert sys.getrefcount(t3) == 2

        # DEPENDENT TASKS SYNC MODE
        ###########################
        self._assert_async_dependent_tasks(p)
    def test_base(self):
        p = ThreadPool()

        # default pools have no workers - and threading was removed entirely ... 
        assert p.size() == 0

        # SINGLE TASK SERIAL SYNC MODE
        ##############################
        # put a few unrelated tasks that we forget about - check ref counts and cleanup
        t1, t2 = FixtureThreadTask(iter(list()), "nothing1", None), FixtureThreadTask(iter(list()), "nothing2", None)
        urc1 = p.add_task(t1)
        urc2 = p.add_task(t2)
        assert p.num_tasks() == 2

        # test pool reader
        assert urc1.pool_ref()() is p
        assert urc1.task_ref()() is t1
        assert urc1.pool() == p
        assert urc1.task() == t1

        ## SINGLE TASK #################
        self._assert_single_task(p, False)
        if py2:
            assert p.num_tasks() == 2
        del(urc1)
        if py2:
            assert p.num_tasks() == 1

        p.remove_task(t2)
        if py2:
            assert p.num_tasks() == 0
        assert sys.getrefcount(t2) == 2

        t3 = FixtureChannelThreadTask(urc2, "channel", None)
        urc3 = p.add_task(t3)
        if py2:
            assert p.num_tasks() == 1
        del(urc3)
        if py2:
            assert p.num_tasks() == 0
        assert sys.getrefcount(t3) == 2


        # DEPENDENT TASKS SYNC MODE
        ###########################
        self._assert_async_dependent_tasks(p)
    def _assert_async_dependent_tasks(self, pool):
        # includes failure in center task, 'recursive' orphan cleanup
        # This will also verify that the channel-close mechanism works
        # t1 -> t2 -> t3

        sys.stderr.write("Threadpool: starting async dependency test in %i threads\n" % pool.size())
        null_tasks = pool.num_tasks()
        ni = 1000
        count = 3
        aic = count + 2
        make_task = lambda *args, **kwargs: add_task_chain(pool, ni, count, *args, **kwargs)

        ts, rcs = make_task()
        assert len(ts) == aic
        assert len(rcs) == aic
        assert pool.num_tasks() == null_tasks + len(ts)

        # read(0)
        #########
        st = time.time()
        items = rcs[-1].read()
        elapsed = time.time() - st
        assert len(items) == ni
        del(rcs)
        if py2:
            assert pool.num_tasks() == 0        # tasks depleted, all done, no handles
        # wait a tiny moment - there could still be something unprocessed on the
        # queue, increasing the refcount
        assert sys.getrefcount(ts[-1]) == 2 # ts + call
        assert sys.getrefcount(ts[0]) == 2  # ts + call
        sys.stderr.write("Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed, ni / elapsed))


        # read(1)
        #########
        ts, rcs = make_task()
        st = time.time()
        for i in range(ni):
            items = rcs[-1].read(1)
            assert len(items) == 1
        # END for each item to pull
        elapsed_single = time.time() - st
        # another read yields nothing, its empty
        assert len(rcs[-1].read()) == 0
        sys.stderr.write("Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed_single, ni / elapsed_single))


        # read with min-count size
        ###########################
        # must be faster, as it will read ni / 4 chunks
        # Its enough to set one task, as it will force all others in the chain
        # to min_size as well.
        ts, rcs = make_task()
        if py2:
            assert pool.num_tasks() == len(ts)
        nri = ni / 4
        ts[-1].min_count = nri
        st = time.time()
        for i in range(ni):
            items = rcs[-1].read(1)
            assert len(items) == 1
        # END for each item to read
        elapsed_minsize = time.time() - st
        # its empty
        assert len(rcs[-1].read()) == 0
        sys.stderr.write("Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )\n" % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize))

        # it should have been a bit faster at least, and most of the time it is
        # Sometimes, its not, mainly because:
        # * The test tasks lock a lot, hence they slow down the system
        # * Each read will still trigger the pool to evaluate, causing some overhead
        #   even though there are enough items on the queue in that case. Keeping
        #   track of the scheduled items helped there, but it caused further inacceptable
        #   slowdown
        # assert elapsed_minsize < elapsed_single


        # read with failure
        ###################
        # it should recover and give at least fail_after items
        # t1 -> x -> t3
        fail_after = ni/2
        ts, rcs = make_task(fail_setup=[(0, fail_after)])
        items = rcs[-1].read()
        assert len(items) == fail_after


        # MULTI-POOL
        # If two pools are connected, this shold work as well.
        # The second one has just one more thread
        ts, rcs = make_task()

        # connect verifier channel as feeder of the second pool
        p2 = ThreadPool(0)      # don't spawn new threads, they have the tendency not to wake up on mutexes
        assert p2.size() == 0
        p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count)
        assert p2ts[0] is None      # we have no feeder task
        assert rcs[-1].pool_ref()() is pool     # it didnt change the pool
        assert rcs[-1] is p2ts[1].reader()
        assert p2.num_tasks() == len(p2ts)-1    # first is None

        # reading from the last one will evaluate all pools correctly
        st = time.time()
        items = p2rcs[-1].read()
        elapsed = time.time() - st
        assert len(items) == ni

        sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic-1, elapsed, ni / elapsed))


        # loose the handles of the second pool to allow others to go as well
        del(p2rcs); del(p2ts)
        assert p2.num_tasks() == 0

        # now we lost our old handles as well, and the tasks go away
        ts, rcs = make_task()
        if py2:
            assert pool.num_tasks() == len(ts)

        p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count)
        assert p2.num_tasks() == len(p2ts) - 1

        # Test multi-read(1)
        reader = rcs[-1]
        st = time.time()
        for i in range(ni):
            items = reader.read(1)
            assert len(items) == 1
        # END for each item to get
        elapsed = time.time() - st
        del(reader)     # decrement refcount

        sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic-1, elapsed, ni / elapsed))

        # another read is empty
        assert len(rcs[-1].read()) == 0

        # now that both are connected, I can drop my handle to the reader
        # without affecting the task-count, but whats more important:
        # They remove their tasks correctly once we drop our references in the
        # right order
        del(p2ts)
        assert p2rcs[0] is rcs[-1]
        del(p2rcs)
        assert p2.num_tasks() == 0
        del(p2)

        if py2:
            assert pool.num_tasks() == null_tasks + len(ts)


        del(ts)
        del(rcs)

        if py2:
            assert pool.num_tasks() == null_tasks
Exemple #4
0
    def _assert_async_dependent_tasks(self, pool):
        # includes failure in center task, 'recursive' orphan cleanup
        # This will also verify that the channel-close mechanism works
        # t1 -> t2 -> t3

        sys.stderr.write(
            "Threadpool: starting async dependency test in %i threads\n" %
            pool.size())
        null_tasks = pool.num_tasks()
        ni = 1000
        count = 3
        aic = count + 2
        make_task = lambda *args, **kwargs: add_task_chain(
            pool, ni, count, *args, **kwargs)

        ts, rcs = make_task()
        assert len(ts) == aic
        assert len(rcs) == aic
        assert pool.num_tasks() == null_tasks + len(ts)

        # read(0)
        #########
        st = time.time()
        items = rcs[-1].read()
        elapsed = time.time() - st
        assert len(items) == ni
        del (rcs)
        if py2:
            assert pool.num_tasks(
            ) == 0  # tasks depleted, all done, no handles
        # wait a tiny moment - there could still be something unprocessed on the
        # queue, increasing the refcount
        time.sleep(0.15)
        assert sys.getrefcount(ts[-1]) == 2  # ts + call
        assert sys.getrefcount(ts[0]) == 2  # ts + call
        sys.stderr.write(
            "Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )\n"
            % (ni, aic, elapsed, ni / elapsed))

        # read(1)
        #########
        ts, rcs = make_task()
        st = time.time()
        for i in range(ni):
            items = rcs[-1].read(1)
            assert len(items) == 1
        # END for each item to pull
        elapsed_single = time.time() - st
        # another read yields nothing, its empty
        assert len(rcs[-1].read()) == 0
        sys.stderr.write(
            "Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )\n"
            % (ni, aic, elapsed_single, ni / elapsed_single))

        # read with min-count size
        ###########################
        # must be faster, as it will read ni / 4 chunks
        # Its enough to set one task, as it will force all others in the chain
        # to min_size as well.
        ts, rcs = make_task()
        if py2:
            assert pool.num_tasks() == len(ts)
        nri = ni / 4
        ts[-1].min_count = nri
        st = time.time()
        for i in range(ni):
            items = rcs[-1].read(1)
            assert len(items) == 1
        # END for each item to read
        elapsed_minsize = time.time() - st
        # its empty
        assert len(rcs[-1].read()) == 0
        sys.stderr.write(
            "Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )\n"
            % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize))

        # it should have been a bit faster at least, and most of the time it is
        # Sometimes, its not, mainly because:
        # * The test tasks lock a lot, hence they slow down the system
        # * Each read will still trigger the pool to evaluate, causing some overhead
        #   even though there are enough items on the queue in that case. Keeping
        #   track of the scheduled items helped there, but it caused further inacceptable
        #   slowdown
        # assert elapsed_minsize < elapsed_single

        # read with failure
        ###################
        # it should recover and give at least fail_after items
        # t1 -> x -> t3
        fail_after = ni / 2
        ts, rcs = make_task(fail_setup=[(0, fail_after)])
        items = rcs[-1].read()
        assert len(items) == fail_after

        # MULTI-POOL
        # If two pools are connected, this shold work as well.
        # The second one has just one more thread
        ts, rcs = make_task()

        # connect verifier channel as feeder of the second pool
        p2 = ThreadPool(
            0
        )  # don't spawn new threads, they have the tendency not to wake up on mutexes
        assert p2.size() == 0
        p2ts, p2rcs = add_task_chain(p2,
                                     ni,
                                     count,
                                     feeder_channel=rcs[-1],
                                     id_offset=count)
        assert p2ts[0] is None  # we have no feeder task
        assert rcs[-1].pool_ref()() is pool  # it didnt change the pool
        assert rcs[-1] is p2ts[1].reader()
        assert p2.num_tasks() == len(p2ts) - 1  # first is None

        # reading from the last one will evaluate all pools correctly
        st = time.time()
        items = p2rcs[-1].read()
        elapsed = time.time() - st
        assert len(items) == ni

        sys.stderr.write(
            "Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )\n"
            % (ni, aic + aic - 1, elapsed, ni / elapsed))

        # loose the handles of the second pool to allow others to go as well
        del (p2rcs)
        del (p2ts)
        assert p2.num_tasks() == 0

        # now we lost our old handles as well, and the tasks go away
        ts, rcs = make_task()
        if py2:
            assert pool.num_tasks() == len(ts)

        p2ts, p2rcs = add_task_chain(p2,
                                     ni,
                                     count,
                                     feeder_channel=rcs[-1],
                                     id_offset=count)
        assert p2.num_tasks() == len(p2ts) - 1

        # Test multi-read(1)
        reader = rcs[-1]
        st = time.time()
        for i in range(ni):
            items = reader.read(1)
            assert len(items) == 1
        # END for each item to get
        elapsed = time.time() - st
        del (reader)  # decrement refcount

        sys.stderr.write(
            "Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )\n"
            % (ni, aic + aic - 1, elapsed, ni / elapsed))

        # another read is empty
        assert len(rcs[-1].read()) == 0

        # now that both are connected, I can drop my handle to the reader
        # without affecting the task-count, but whats more important:
        # They remove their tasks correctly once we drop our references in the
        # right order
        del (p2ts)
        assert p2rcs[0] is rcs[-1]
        del (p2rcs)
        assert p2.num_tasks() == 0
        del (p2)

        if py2:
            assert pool.num_tasks() == null_tasks + len(ts)

        del (ts)
        del (rcs)

        if py2:
            assert pool.num_tasks() == null_tasks