def test_chunk_size(self): log_file = os.path.join(os.path.dirname(__file__), "..", "logs", "chunk.log") try: fiber.reset() log_file = fiber_config.log_file log_level = fiber_config.log_level fiber.init(cpu_per_job=8, log_file=log_file, log_level="debug") n_envs = 9 queues = [(SimpleQueue(), SimpleQueue()) for _ in range(n_envs)] pool = Pool(n_envs) # explicitly start workers instead of lazy start pool.start_workers() print("waiting for all workers to be up") # wait some time for workers to start pool.wait_until_workers_up() #time.sleep(20) print("all workers are up") def run_map(): print('[master]RUN MAP') # Not setting chunk size 1 here, if chunk size is calculated # wrong, map will get stuck pool.map(double_queue_worker, enumerate(queues), chunksize=1) print('[master]RUN MAP DONE') td = threading.Thread(target=run_map, daemon=True) td.start() print('Checking...') for i, (_, returns) in enumerate(queues): print('[master]Checking queue', i, n_envs) assert 'READY' in returns.get() print(f'[master]Checking queue {i} done') print('All workers are ready, put HELLOs') for i, (instruction, _) in enumerate(queues): instruction.put("HELLO") print(f'[master]PUT HELLO {i}') print('All HELLOs sent, waiting for ACKs') for i, (_, returns) in enumerate(queues): assert 'ACK' in returns.get() print(f'[master]GOT ACK {i}') print('All ACKs sent, send QUIT to workers') for i, (instruction, _) in enumerate(queues): instruction.put("QUIT") print(f'[master]PUT QUIT {i}, {n_envs}') pool.terminate() pool.join() finally: fiber.init(log_file=log_file, log_level=log_level)
def test_image_not_found(self): if fiber.config.default_backend != "docker": pytest.skip("skipped because current backend is not docker") try: with pytest.raises(multiprocessing.ProcessError): fiber.init( image='this-image-does-not-exist-and-is-only-used-for-testing' ) p = fiber.Process(name="test_image_not_found") p.start() finally: fiber.reset()
def test_init_image(self, mock_create_job): if fiber.config.default_backend != "docker": pytest.skip("skipped because current backend is not docker") try: fiber.init(image='python:3.6-alpine', backend='docker') p = fiber.Process(name="test_init_image") p.start() except NotImplementedError: pass finally: fiber.reset() args = mock_create_job.call_args # https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.call_args # noqa E501 job_spec_arg = args[0][0] assert job_spec_arg.image == "python:3.6-alpine"
def test_no_python3_inside_image(self): if fiber.config.default_backend != "docker": pytest.skip("skipped because current backend is not docker") ''' fp = io.StringIO() handler = logging.StreamHandler(fp) logger = logging.getLogger("fiber") logger.setLevel(level=logging.DEBUG) logger.addHandler(handler) ''' with pytest.raises(multiprocessing.ProcessError): try: fiber.init(image='ubuntu:18.04') p = fiber.Process(name="test_no_python3_inside_image") p.start() finally: fiber.reset() #logger.removeHandler(handler) '''
def test_no_python3_inside_image(self): if fiber.config.default_backend != "docker": pytest.skip("skipped because current backend is not docker") fp = io.StringIO() handler = logging.StreamHandler(fp) logger = logging.getLogger("fiber") logger.setLevel(level=logging.DEBUG) logger.addHandler(handler) try: fiber.init(image='ubuntu:18.04') p = fiber.Process(name="test_no_python3_inside_image") p.start() logs = fp.getvalue() times = len(list(re.finditer('Failed to start Fiber process', logs))) assert times == 1 finally: fiber.reset() logger.removeHandler(handler)