def test_chunk_size(self): log_file = os.path.join(os.path.dirname(__file__), "..", "logs", "chunk.log") try: fiber.reset() log_file = fiber_config.log_file log_level = fiber_config.log_level fiber.init(cpu_per_job=8, log_file=log_file, log_level="debug") n_envs = 9 queues = [(SimpleQueue(), SimpleQueue()) for _ in range(n_envs)] pool = Pool(n_envs) # explicitly start workers instead of lazy start pool.start_workers() print("waiting for all workers to be up") # wait some time for workers to start pool.wait_until_workers_up() #time.sleep(20) print("all workers are up") def run_map(): print('[master]RUN MAP') # Not setting chunk size 1 here, if chunk size is calculated # wrong, map will get stuck pool.map(double_queue_worker, enumerate(queues), chunksize=1) print('[master]RUN MAP DONE') td = threading.Thread(target=run_map, daemon=True) td.start() print('Checking...') for i, (_, returns) in enumerate(queues): print('[master]Checking queue', i, n_envs) assert 'READY' in returns.get() print(f'[master]Checking queue {i} done') print('All workers are ready, put HELLOs') for i, (instruction, _) in enumerate(queues): instruction.put("HELLO") print(f'[master]PUT HELLO {i}') print('All HELLOs sent, waiting for ACKs') for i, (_, returns) in enumerate(queues): assert 'ACK' in returns.get() print(f'[master]GOT ACK {i}') print('All ACKs sent, send QUIT to workers') for i, (instruction, _) in enumerate(queues): instruction.put("QUIT") print(f'[master]PUT QUIT {i}, {n_envs}') pool.terminate() pool.join() finally: fiber.init(log_file=log_file, log_level=log_level)
def test_pool_more(self): pool = Pool(4) res = pool.map(f, [i for i in range(1000)]) pool.wait_until_workers_up() pool.terminate() pool.join() assert res == [i**2 for i in range(1000)]
def test_pool_more(self): pool = Pool(4) # explicitly start workers instead of lazy start pool.start_workers() res = pool.map(f, [i for i in range(1000)]) pool.wait_until_workers_up() pool.terminate() pool.join() assert res == [i**2 for i in range(1000)]
def test_pool_multiple_workers_inside_one_job(self): old_val = fiber_config.cpu_per_job try: fiber_config.cpu_per_job = 2 pool = Pool(4) # explicitly start workers instead of lazy start pool.start_workers() # wait for all the workers to start pool.wait_until_workers_up() res = pool.map(get_proc_name, [i for i in range(4)], chunksize=1) pool.terminate() pool.join() res.sort() # Two `ForkProcess-1` from 2 jobs, two `ForkProcess-2` from the first job assert res == ['ForkProcess-1', 'ForkProcess-1', 'ForkProcess-2', 'ForkProcess-2'], res finally: fiber_config.cpu_per_job = old_val