Example #1
0
    def test_chunk_size(self):
        log_file = os.path.join(os.path.dirname(__file__), "..", "logs",
                                "chunk.log")
        try:
            fiber.reset()
            log_file = fiber_config.log_file
            log_level = fiber_config.log_level

            fiber.init(cpu_per_job=8, log_file=log_file, log_level="debug")

            n_envs = 9
            queues = [(SimpleQueue(), SimpleQueue()) for _ in range(n_envs)]
            pool = Pool(n_envs)

            # explicitly start workers instead of lazy start
            pool.start_workers()
            print("waiting for all workers to be up")
            # wait some time for workers to start
            pool.wait_until_workers_up()
            #time.sleep(20)
            print("all workers are up")

            def run_map():
                print('[master]RUN MAP')
                # Not setting chunk size 1 here, if chunk size is calculated
                # wrong,  map will get stuck
                pool.map(double_queue_worker, enumerate(queues), chunksize=1)
                print('[master]RUN MAP DONE')

            td = threading.Thread(target=run_map, daemon=True)
            td.start()

            print('Checking...')
            for i, (_, returns) in enumerate(queues):
                print('[master]Checking queue', i, n_envs)
                assert 'READY' in returns.get()
                print(f'[master]Checking queue {i} done')

            print('All workers are ready, put HELLOs')
            for i, (instruction, _) in enumerate(queues):
                instruction.put("HELLO")
                print(f'[master]PUT HELLO {i}')

            print('All HELLOs sent, waiting for ACKs')
            for i, (_, returns) in enumerate(queues):
                assert 'ACK' in returns.get()
                print(f'[master]GOT ACK {i}')

            print('All ACKs sent, send QUIT to workers')
            for i, (instruction, _) in enumerate(queues):
                instruction.put("QUIT")
                print(f'[master]PUT QUIT {i}, {n_envs}')
            pool.terminate()
            pool.join()

        finally:
            fiber.init(log_file=log_file, log_level=log_level)
Example #2
0
    def test_pi_estimation(self):
        pool = Pool(processes=4)
        NUM_SAMPLES = int(1e6)
        pi = 4.0 * sum(pool.map(is_inside, range(0, NUM_SAMPLES))) / NUM_SAMPLES
        assert 3 < pi and pi < 4
        print("Pi is roughly {}".format(pi))

        pool.terminate()
        pool.join()
Example #3
0
    def test_pool_more(self):
        pool = Pool(4)
        res = pool.map(f, [i for i in range(1000)])

        pool.wait_until_workers_up()

        pool.terminate()
        pool.join()
        assert res == [i**2 for i in range(1000)]
Example #4
0
    def test_pool_starmap2(self):
        pool = Pool(4)
        res = pool.starmap(f2, [(x, x) for x in range(100)], 10)
        assert res == [x * x for x in range(100)]

        async_res = pool.starmap_async(f, [(x,) for x in range(100)], 10)
        res = async_res.get()
        assert res == [x * x for x in range(100)]

        pool.terminate()
        pool.join()
Example #5
0
    def test_pool_more(self):
        pool = Pool(4)

        # explicitly start workers instead of lazy start
        pool.start_workers()
        res = pool.map(f, [i for i in range(1000)])

        pool.wait_until_workers_up()

        pool.terminate()
        pool.join()
        assert res == [i**2 for i in range(1000)]
Example #6
0
    def test_pool_imap(self):
        pool = Pool(4)
        res_iter = pool.imap(f, [x for x in range(100)], 1)
        res = list(res_iter)
        assert res == [x * x for x in range(100)]

        res_iter = pool.imap_unordered(f, [x for x in range(100)], 1)
        res = list(res_iter)
        assert len(res) == 100
        res.sort()
        assert res == [x * x for x in range(100)]

        pool.terminate()
        pool.join()
Example #7
0
    def test_pool_apply(self):
        pool = Pool(4)
        res_async = pool.apply_async(f, (42,))
        r = res_async.get()
        assert r == (42 * 42)

        res = pool.apply(f, (36,))
        assert res == (36 * 36)

        res = pool.apply(fy, (36,), {"y": 2})
        assert res == (36 * 36 * 2)

        pool.terminate()
        pool.join()
Example #8
0
    def test_pool_multiple_workers_inside_one_job(self):
        old_val = fiber_config.cpu_per_job
        try:
            fiber_config.cpu_per_job = 2
            pool = Pool(4)
            # explicitly start workers instead of lazy start
            pool.start_workers()
            # wait for all the workers to start
            pool.wait_until_workers_up()

            res = pool.map(get_proc_name, [i for i in range(4)], chunksize=1)
            pool.terminate()
            pool.join()
            res.sort()
            # Two `ForkProcess-1` from 2 jobs, two `ForkProcess-2` from the first job
            assert res == ['ForkProcess-1', 'ForkProcess-1', 'ForkProcess-2', 'ForkProcess-2'], res
        finally:
            fiber_config.cpu_per_job = old_val
Example #9
0
 def test_pool_basic(self):
     pool = Pool(2)
     res = pool.map(f, [1, 2, 3])
     pool.terminate()
     pool.join()
     assert res == [1, 4, 9]