async def test_pass_data_between_subprocesses_outer_fails(run, num_workers): if num_workers < 2: pytest.skip('need Scheduler with at least 2 workers') argv_src = mock_argv('FOO', 'BAR', 'exit:1') argv_dst = mock_argv('foo', 'in:', 'bar', 'in:', 'baz', 'in:') # Outer/src process does exit 1 while inner process is waiting for input. # Inner/dst process gets EOF, prints empty string, and does exit 0. async def coro(ctx): with ctx.tjob.subprocess_xevents(argv_src, result=1): async with ctx.subprocess(argv_src, stdout=PIPE, check=True) as proc_src: with ctx.tjob.subprocess_xevents(argv_dst, result=0): async with ctx.subprocess(argv_dst, stdin=PIPE, stdout=PIPE, check=True) as proc_dst: await copy_lines(proc_src.stdout, proc_dst.stdin) output = await proc_dst.stdout.read() await proc_dst.wait() await proc_src.wait() return output job = TJob('foo', coro=coro) done = await run([job]) assert verify_tasks(done, {'foo': CalledProcessError(1, argv_src)})
async def test_abort_job_with_two_non_terminating_kills_both(run, num_workers): if num_workers < 2: pytest.skip('need Scheduler with at least 2 workers') argv1 = mock_argv('ignore:SIGTERM', 'ignore:SIGINT', 'FOO', 'sleep:5') argv2 = mock_argv('ignore:SIGTERM', 'ignore:SIGINT', 'BAR', 'sleep:5') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv1, result='kill'): async with ctx.subprocess(argv1, stdout=PIPE, kill_delay=0.1) as proc1: assert b'FOO\n' == await proc1.stdout.readline() with ctx.tjob.subprocess_xevents(argv2, result='kill'): async with ctx.subprocess(argv2, stdout=PIPE, kill_delay=0.1) as proc2: assert b'BAR\n' == await proc2.stdout.readline() # Both subprocesses will now ignore SIGTERM and we can # proceed with cancelling. async with abort_in(0.1): await proc2.wait() await proc1.wait() todo = [TJob('foo', coro=coro)] with assert_elapsed_time(lambda t: t < 1): done = await run(todo) assert verify_tasks(done, {'foo': Cancelled})
async def test_pass_data_between_subprocesses_inner_fails(run, num_workers): if num_workers < 2: pytest.skip('need Scheduler with at least 2 workers') argv_src = mock_argv('FOO', 'BAR', 'BAZ') argv_dst = mock_argv('foo', 'in:', 'bar', 'in:', 'exit:2') # Outer/src process produces all its output and does exit 0. # Inner/dst process does exit 2 without consuming all input. # Adding check=True here complicates the test as the outer process # may or may not be terminated by the inner's CalledProcessError(2). async def coro(ctx): with ctx.tjob.subprocess_xevents(argv_src, result=0): async with ctx.subprocess(argv_src, stdout=PIPE, check=True) as proc_src: with ctx.tjob.subprocess_xevents(argv_dst, result=2): async with ctx.subprocess(argv_dst, stdin=PIPE, stdout=PIPE) as proc_dst: output = b'' await copy_lines(proc_src.stdout, proc_dst.stdin) output += await proc_dst.stdout.read() await proc_dst.wait() await proc_src.wait() return proc_dst.returncode, output job = TJob('foo', coro=coro) done = await run([job]) assert verify_tasks(done, {'foo': (2, b'foo\nFOO\nbar\nBAR\n')})
async def job(ctx): argv = mock_argv('Printing to stdout', 'err:', 'Printing to stderr', 'log:Logging!') async with ctx.subprocess(argv, stdout=PIPE) as proc: output = await proc.stdout.read() await proc.wait() return output
async def test_two_subprocesses_concurrently(run, num_workers): argv = mock_argv() async def coro(ctx): results = await asyncio.gather( ctx.run_in_subprocess(argv), ctx.run_in_subprocess(argv), return_exceptions=True, ) for result in results: if isinstance(result, Exception): raise result return results job = TJob('foo', coro=coro) if num_workers < 2: # Cannot reserve second worker with job.subprocess_xevents(argv, result=0): # Only one subprocess expect_result = RuntimeError( f'Cannot reserve >={num_workers} worker(s)!') else: with job.subprocess_xevents(argv, result=0): with job.subprocess_xevents(argv, result=0): expect_result = [0, 0] done = await run([job]) assert verify_tasks(done, {'foo': expect_result})
async def test_abort_many_jobs_in_subprocs_returns_immediately(num_jobs, run): todo = [ TJob(f'foo #{i}', argv=mock_argv('sleep:30')) for i in range(num_jobs) ] with assert_elapsed_time(lambda t: t < 10.0): done = await run(todo, abort_after=0.1) assert verify_tasks(done, {f'foo #{i}': Cancelled for i in range(num_jobs)})
async def test_one_failed_between_two_in_subprocs_cancels_last(run, tmp_path): foo_path = tmp_path / 'foo' baz_path = tmp_path / 'baz' todo = [ TJob('foo', before={'bar'}, argv=mock_argv(f'touch:{foo_path}')), TJob('bar', {'foo'}, before={'baz'}, argv=mock_argv('exit:1')), TJob('baz', {'bar'}, argv=mock_argv(f'touch:{baz_path}')), ] done = await run(todo) assert verify_tasks( done, { 'foo': 0, 'bar': CalledProcessError(1, mock_argv('exit:1')), 'baz': Cancelled, }, ) assert foo_path.is_file() assert not baz_path.exists()
async def test_not_awaiting_subprocess_terminates_it(run): argv = mock_argv('FOO', 'sleep:5') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result='terminate'): async with ctx.subprocess(argv, stdout=PIPE) as proc: output = await proc.stdout.readline() # MISSING await proc.wait() here to trigger termination return output todo = [TJob('foo', coro=coro)] done = await run(todo) assert verify_tasks(done, {'foo': b'FOO\n'})
async def test_one_ok_subprocess_with_error_output(run): argv = mock_argv('err:', 'FOO') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result=0): async with ctx.subprocess(argv, stderr=PIPE) as proc: output = await proc.stderr.read() await proc.wait() return output todo = [TJob('foo', coro=coro)] done = await run(todo) assert verify_tasks(done, {'foo': b'FOO\n'})
async def test_pass_data_between_subprocesses(run, num_workers): if num_workers < 2: pytest.skip('need Scheduler with at least 2 workers') argv_src = mock_argv('FOO', 'BAR', 'BAZ') argv_dst = mock_argv('foo', 'in:', 'bar', 'in:', 'baz', 'in:') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv_src, result=0): async with ctx.subprocess(argv_src, stdout=PIPE) as proc_src: with ctx.tjob.subprocess_xevents(argv_dst, result=0): async with ctx.subprocess(argv_dst, stdin=PIPE, stdout=PIPE) as proc_dst: await copy_lines(proc_src.stdout, proc_dst.stdin) output = await proc_dst.stdout.read() await proc_dst.wait() await proc_src.wait() return output job = TJob('foo', coro=coro) done = await run([job]) assert verify_tasks(done, {'foo': b'foo\nFOO\nbar\nBAR\nbaz\nBAZ\n'})
async def test_one_failing_subprocess_with_output_disable_check(run): argv = mock_argv('FOO', 'exit:1') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result=1): async with ctx.subprocess(argv, stdout=PIPE, check=False) as proc: output = await proc.stdout.read() await proc.wait() assert proc.returncode == 1 return output todo = [TJob('foo', coro=coro)] done = await run(todo) assert verify_tasks(done, {'foo': b'FOO\n'})
async def test_two_subprocesses_with_output_concurrently(run, num_workers): argv1 = mock_argv('FOO') argv2 = mock_argv('BAR') async def coro(ctx): async with ctx.subprocess(argv1, stdout=PIPE) as proc1: async with ctx.subprocess(argv2, stdout=PIPE) as proc2: output2 = await proc2.stdout.read() await proc2.wait() output1 = await proc1.stdout.read() await proc1.wait() return output1 + output2 job = TJob('foo', coro=coro) if num_workers < 2: # Cannot reserve second worker with job.subprocess_xevents(argv1, result='terminate'): expect_result = RuntimeError( f'Cannot reserve >={num_workers} worker(s)!') else: with job.subprocess_xevents(argv1, result=0): with job.subprocess_xevents(argv2, result=0): expect_result = b'FOO\nBAR\n' done = await run([job]) assert verify_tasks(done, {'foo': expect_result})
async def test_two_subprocesses_sequentially_on_same_ticket_succeeds(run): argv = mock_argv() async def coro(ctx): async with ctx.reserve_worker() as ticket: with ctx.tjob.subprocess_xevents(argv, result=0): result = await ctx.run_in_subprocess(argv, ticket=ticket) with ctx.tjob.subprocess_xevents(argv, result=0, reuse_ticket=True): result += await ctx.run_in_subprocess(argv, ticket=ticket) return result job = TJob('foo', coro=coro) done = await run([job]) assert verify_tasks(done, {'foo': 0})
async def test_non_terminating_subprocess_is_killed(run): argv = mock_argv('ignore:SIGTERM', 'FOO', 'sleep:5') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result='kill'): async with ctx.subprocess(argv, stdout=PIPE, kill_delay=0.1) as proc: output = await proc.stdout.readline() # MISSING await proc.wait() here to trigger termination return output todo = [TJob('foo', coro=coro)] with assert_elapsed_time(lambda t: t < 0.5): done = await run(todo) assert verify_tasks(done, {'foo': b'FOO\n'})
async def test_subprocess_with_custom_env(run): argv = mock_argv('env:FOOBAR') env = os.environ.copy() env['FOOBAR'] = 'BAZZLE' async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result=0): async with ctx.subprocess(argv, stdout=PIPE, env=env) as proc: output = await proc.stdout.read() await proc.wait() return output todo = [TJob('foo', coro=coro)] done = await run(todo) assert verify_tasks(done, {'foo': b'BAZZLE\n'})
async def test_subprocess_with_custom_cwd(run, tmp_path): argv = mock_argv('cwd:') cwd = tmp_path / 'custom' cwd.mkdir() async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result=0): async with ctx.subprocess(argv, stdout=PIPE, cwd=cwd) as proc: output = await proc.stdout.read() await proc.wait() return Path(os.fsdecode(output.rstrip())) todo = [TJob('foo', coro=coro)] done = await run(todo) assert verify_tasks(done, {'foo': cwd})
def mock_argv(self, extra_args=None): """Return a suitable mock_argv for doing this job in a subprocess.""" assert self.out.endswith('\n') # newline-terminated string assert self.err.endswith('\n') # newline-terminated string args = ['out:', self.out.rstrip()] for arg in extra_args or []: if not arg.startswith('exit:'): args.append(arg) args += ['err:', self.err.rstrip()] if self.log: args.append(f'log:{self.log}') for arg in extra_args or []: if arg.startswith('exit:'): args.append(arg) return mock_argv(*args)
async def test_one_failing_subprocess_with_output_enable_check(run): argv = mock_argv('FOO', 'exit:1') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result=1): async with ctx.subprocess(argv, stdout=PIPE, check=True) as proc: output = await proc.stdout.read() assert output == b'FOO\n' await proc.wait() assert proc.returncode == 1 assert False # skipped due to CalledProcessError on context exit todo = [TJob('foo', coro=coro)] done = await run(todo) assert verify_tasks(done, {'foo': CalledProcessError(1, argv)})
async def test_abort_one_non_terminating_job_teminates_then_kills(run): argv = mock_argv('ignore:SIGTERM', 'ignore:SIGINT', 'FOO', 'sleep:5') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result='kill'): async with ctx.subprocess(argv, stdout=PIPE, kill_delay=0.1) as proc: assert b'FOO\n' == await proc.stdout.readline() # Subprocess will now ignore SIGTERM when we are cancelled async with abort_in(0.1): await proc.wait() todo = [TJob('foo', coro=coro)] with assert_elapsed_time(lambda t: t < 2): done = await run(todo) assert verify_tasks(done, {'foo': Cancelled})
async def test_abort_job_with_two_subprocs_terminates_both(run, num_workers): if num_workers < 2: pytest.skip('need Scheduler with at least 2 workers') argv = mock_argv('sleep:5') async def coro(ctx): with ctx.tjob.subprocess_xevents(argv, result='terminate'): async with ctx.subprocess(argv) as proc1: with ctx.tjob.subprocess_xevents(argv, result='terminate'): async with ctx.subprocess(argv) as proc2: await proc2.wait() await proc1.wait() todo = [TJob('foo', coro=coro)] with assert_elapsed_time(lambda t: t < 1): done = await run(todo, abort_after=0.2) assert verify_tasks(done, {'foo': Cancelled})
async def test_two_subprocesses_concurrently_on_same_ticket_fails(run): argv = mock_argv() async def coro(ctx): async with ctx.reserve_worker() as ticket: with ctx.tjob.subprocess_xevents(argv, result=0): # Only one results = await asyncio.gather( ctx.run_in_subprocess(argv, ticket=ticket), ctx.run_in_subprocess(argv, ticket=ticket), return_exceptions=True, ) for result in results: if isinstance(result, BaseException): raise result return results job = TJob('foo', coro=coro) done = await run([job]) assert verify_tasks(done, {'foo': expect_ValueError_already_in_use})
async def test_abort_one_job_in_subproc_returns_immediately(run): todo = [TJob('foo', argv=mock_argv('sleep:5'))] with assert_elapsed_time(lambda t: t < 1): done = await run(todo, abort_after=0.1) assert verify_tasks(done, {'foo': Cancelled})
async def job(ctx): argv = mock_argv('Printing to stdout', 'err:', 'Printing to stderr', 'log:Logging!') async with ctx.subprocess(argv) as proc: await proc.wait()
async def test_one_ok_job_in_subproc(run, tmp_path): path = tmp_path / 'foo' todo = [TJob('foo', argv=mock_argv(f'touch:{path}'))] done = await run(todo) assert verify_tasks(done, {'foo': 0}) assert path.is_file()