def test_capsys(testdir_setup, global_cap_method): testdir_setup.makepyfile(r""" def test_print(capsys): import sys print("a:out") print("a:err", file=sys.stderr) assert list(capsys.readouterr()) == ["a:out\n", "a:err\n"] assert True # line 6 print("b:out", flush=True) print("b:err", flush=True, file=sys.stderr) assert list(capsys.readouterr()) == ["b:out\n", "b:err\n"] """) # raw string \n pe = testdir_setup.spawn_pytest("--break=test_capsys.py:6 " "--capture=%s" % global_cap_method) pe.expect(prompt_re) befs = unansi(pe.before) assert "a:out" not in befs lbefs = LineMatcher(befs) lbefs.fnmatch_lines(("*>*/test_capsys.py(6)test_print()", "->*# line 6")) pe.sendline("c") pe.expect(EOF) befs = unansi(pe.before) assert "b:out" not in befs lbefs = LineMatcher(befs) lbefs.fnmatch_lines(["*[[]100%[]]*", "*1 passed*"])
def test_list_repos_with_skipped_project(cli_runner, project_tree, piped_shell_execute): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ root = str(project_tree.join("root_c")) base_args = ["-p", root, "--repos"] # Test pretty print. command_args = base_args + ["--skip-project=dep_c1.3", "-pp"] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ "*[\\/]test_projects0[\\/]root_c", " *[\\/]test_projects0[\\/]cs1", " (*[\\/]test_projects0[\\/]cs1)", " *[\\/]test_projects0[\\/]cs2", " (*[\\/]test_projects0[\\/]cs1)", ]) # Test pretty print. command_args = base_args + ["--skip-project=dep_c2.1", "-pp"] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ "*[\\/]test_projects0[\\/]root_c", " *[\\/]test_projects0[\\/]cs1", " (*[\\/]test_projects0[\\/]cs1)", " {*[\\/]test_projects0[\\/]cs2}", " (*[\\/]test_projects0[\\/]cs1)", ])
def test_linematcher_match_failure() -> None: lm = LineMatcher(["foo", "foo", "bar"]) with pytest.raises(Failed) as e: lm.fnmatch_lines(["foo", "f*", "baz"]) assert e.value.msg is not None assert e.value.msg.splitlines() == [ "exact match: 'foo'", "fnmatch: 'f*'", " with: 'foo'", "nomatch: 'baz'", " and: 'bar'", "remains unmatched: 'baz'", ] lm = LineMatcher(["foo", "foo", "bar"]) with pytest.raises(Failed) as e: lm.re_match_lines(["foo", "^f.*", "baz"]) assert e.value.msg is not None assert e.value.msg.splitlines() == [ "exact match: 'foo'", "re.match: '^f.*'", " with: 'foo'", " nomatch: 'baz'", " and: 'bar'", "remains unmatched: 'baz'", ]
def test_list_repos_conflict_skipped_ignored_project(cli_runner, project_tree, piped_shell_execute): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ root = str(project_tree.join('root_c')) base_args = ['-p', root, '--repos'] # Test pretty print. command_args = base_args + ['--skip-project=dep_c1.3', '--ignore-project=dep_c1.3', '-pp'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ '*[\\/]test_projects0[\\/]root_c', ' *[\\/]test_projects0[\\/]cs1', ' (*[\\/]test_projects0[\\/]cs1)', ]) # Test pretty print. command_args = base_args + ['--skip-project=dep_c2.1', '--ignore-project=dep_c2.1', '-pp'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ '*[\\/]test_projects0[\\/]root_c', ' *[\\/]test_projects0[\\/]cs1', ' (*[\\/]test_projects0[\\/]cs1)', ' <*[\\/]test_projects0[\\/]cs2>', ])
def test_elsewhere_fixture_parametrize(testdir_setup): # TODO try hard-coding coordinates to show ordering is predictable testdir_setup.makepyfile(test_file=""" import pytest @pytest.fixture(params=["a", "b"]) def fixie(request): spam = request.param # <- line 5 yield spam.upper() del spam def test_foo(fixie): assert fixie in "AB" def test_bar(fixie): assert fixie in "AB" """) pe = testdir_setup.spawn_pytest("--break=test_file.py:5") pairs = [("foo", "a"), ("foo", "b"), ("bar", "a"), ("bar", "b")] for word, letter in pairs: pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*>*/test_file.py(5)fixie()", "->*# <- line 5"]) pe.sendline("request.node.name, request.param") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines([f"*test_{word}*{letter}*{letter}*"]) pe.sendline("c") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines("*4 passed*")
def test_elsewhere_fixture_with_yield_nested(testdir_setup): testdir_setup.makepyfile(test_file=""" import pytest @pytest.fixture def fixie(): class C: def __init__(self): print("in init") # <- line 7 def ran(self): return True c = C() yield c del c def test_foo(fixie): assert fixie.ran() """) pe = testdir_setup.spawn_pytest("--break=test_file.py:7") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*>*/test_file.py(7)__init__()", "->*# <- line 7"]) pe.sendline("c") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines("*1 passed*")
def test_marked_module_func_nested(testdir): filename = testdir.copy_example("asyncio/test_marked_mod.py") assert filename.exists() pe = testdir.spawn_pytest("--break=test_marked_mod.py:24 " "test_marked_mod.py::test_baz") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*>*(24)inner()*", '*assert x*']) # Ensure previous frames properly hidden pe.sendline("w") pe.expect(prompt_re) assert b")runcall_until" not in pe.before assert b"runcall_until_async" not in pe.before befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines([ "*(26)test_baz()*", "*->*await inner*", "*(24)inner()*", ]) pe.sendline("c") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines([ "*1 passed*", ])
def test_elsewhere_fixture_double(testdir_setup): testdir_setup.makepyfile(test_file=""" import pytest @pytest.fixture def fixie(): return True # <- line 5 def test_foo(fixie): assert fixie def test_bar(fixie): assert fixie """) pe = testdir_setup.spawn_pytest("--break=test_file.py:5") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*>*/test_file.py(5)fixie()", "->*# <- line 5"]) pe.sendline("c") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*>*/test_file.py(5)fixie()", "->*# <- line 5"]) pe.sendline("c") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines("*2 passed*")
def test_cant_find_root(cli_runner, project_tree, piped_shell_execute): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ proj_dir = str(project_tree.join('not_a_project')) command_args = ['-p', proj_dir, 'echo', 'Hi', '{name}!'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exception is None or isinstance(result.exception, SystemExit) assert result.exit_code != 0 matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ 'deps: error: could not find "environment.devenv.yml" for "*[\\/]test_projects0[\\/]not_a_project".', ]) proj_dir = str(project_tree.join('not_a_valid_folder')) command_args = ['-p', proj_dir, 'echo', 'Hi', '{name}!'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exception is None or isinstance(result.exception, SystemExit) assert result.exit_code != 0 matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ 'deps: error: could not find "environment.devenv.yml" for "*[\\/]test_projects0[\\/]not_a_valid_folder".', ])
def test_registered_local_popt(testdir, source, ext_fake): # Without -p option behaves identical to importable, non-repo repo = new_repo(testdir) pyexe = get_pyexe("base") localfile = repo / "pytest_pdb_break.py" localfile.write_binary(ext_fake.join("pytest_pdb_break.py").read_binary()) rc, out, errs = run_with_input([pyexe, "-", "-p", "pytest_pdb_break"], source, repo) assert rc == 0 and out and not errs assert json.loads(out) == {"rootdir": repo, "registered": True} # Plugin shows up in help rc, out, errs = run_with_input( [pyexe, "-mpytest", "-p", "pytest_pdb_break", "--help"], "", repo, "pt_help") assert rc == 0 and out and not errs LineMatcher(out.splitlines()).fnmatch_lines("*--break=*") # Plugin not mentioned in in report but still runs rc, out, errs = run_with_input( [pyexe, "-mpytest", "-p", "pytest_pdb_break", "--break=99"], "", repo, "pt_opt") assert rc == 0 and out and not errs assert "plugins: pdb-break" not in out LineMatcher(out.splitlines()).fnmatch_lines("*fake-pdb-break*99*")
def test_compare(testdir, name): result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'compare', '0001', '0002', '0003', '--sort', 'min', '--columns', 'min,max', '--name', name, '--histogram', 'foobar', '--csv', 'foobar') result.stderr.fnmatch_lines(['Generated csv: *foobar.csv']) LineMatcher( testdir.tmpdir.join('foobar.csv').readlines(cr=0) ).fnmatch_lines([ "name,min,max", "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.15628567*e-07,1.03186158*e-05", "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.16902756*e-07,7.73929968*e-06", "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.17314542*e-07,1.14473891*e-05", "" ]) result.stdout.fnmatch_lines([ 'Computing stats ...', '---*--- benchmark: 3 tests ---*---', 'Name (time in ns) * Min * Max ', '---*---', '*xfast_parametrized[[]0[]] (0003*) 215.6286 (1.0) 10*318.6159 (1.33) ', '*xfast_parametrized[[]0[]] (0002*) 216.9028 (1.01) 7*739.2997 (1.0) ', '*xfast_parametrized[[]0[]] (0001*) 217.3145 (1.01) 11*447.3891 (1.48) ', '---*---', '', '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', ]) assert result.ret == 0
def test_deps_parallel(cli_runner, project_tree, monkeypatch): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ monkeypatch.chdir(project_tree.join("root_b")) command_args = ["--jobs=2", "--", "python", "-c", '"name: {name}"'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines_random([ "=======================================================================================================================", "dep_z (1/4)", "Finished: dep_z in *", "=======================================================================================================================", "dep_b.1.1 (2/4)", "Finished: dep_b.1.1 in *", "=======================================================================================================================", "dep_b.1 (3/4)", "Finished: dep_b.1 in *", "=======================================================================================================================", "root_b (4/4)", "Finished: root_b in *", ])
def test_deps_parallel_2(cli_runner, project_tree, monkeypatch): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ monkeypatch.chdir(project_tree.join('root_a')) command_args = ['--jobs=2', '--', 'python', '-c', '"name: {name}"'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines_random([ '=======================================================================================================================', 'dep_z (1/6)', 'Finished: dep_z in *', '=======================================================================================================================', 'dep_a.2, dep_a.1.1, dep_a.1.2 (4/6)', 'Finished: dep_a.2 in *', 'Finished: dep_a.1.1 in *', 'Finished: dep_a.1.2 in *', '=======================================================================================================================', 'dep_a.1 (5/6)', 'Finished: dep_a.1 in *', '=======================================================================================================================', 'root_a (6/6)', 'Finished: root_a in *', ])
def test_marked_module_class(testdir): filename = testdir.copy_example("asyncio/test_marked_mod.py") assert filename.exists() pe = testdir.spawn_pytest("--break=test_marked_mod.py:16 " "test_marked_mod.py::TestClass::test_bar") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines( ["*>*(16)test_bar()*", '*assert "asyncio" in request.keywords*']) pe.sendline("c") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines([ "*1 passed*", ])
def test_exception_repr_extraction_error_on_recursion(): """ Ensure we can properly detect a recursion error even if some locals raise error on comparision (#2459). """ class numpy_like(object): def __eq__(self, other): if type(other) is numpy_like: raise ValueError('The truth value of an array ' 'with more than one element is ambiguous.') def a(x): return b(numpy_like()) def b(x): return a(numpy_like()) try: a(numpy_like()) except: from _pytest._code.code import ExceptionInfo from _pytest.pytester import LineMatcher exc_info = ExceptionInfo() matcher = LineMatcher(str(exc_info.getrepr()).splitlines()) matcher.fnmatch_lines([ '!!! Recursion error detected, but an error occurred locating the origin of recursion.', '*The following exception happened*', '*ValueError: The truth value of an array*', ])
def test_all(self, testdir, pastebinlist): from _pytest.pytester import LineMatcher testpath = testdir.makepyfile( """ import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") """ ) reprec = testdir.inline_run(testpath, "--pastebin=all", "-v") assert reprec.countoutcomes() == [1, 1, 1] assert len(pastebinlist) == 1 contents = pastebinlist[0].decode("utf-8") matcher = LineMatcher(contents.splitlines()) matcher.fnmatch_lines( [ "*test_pass PASSED*", "*test_fail FAILED*", "*test_skip SKIPPED*", "*== 1 failed, 1 passed, 1 skipped in *", ] )
async it "cleans up tests properly on interrupt": directory = os.path.join(this_dir, "interrupt_test") expected_file = os.path.join(directory, "expected") assert os.path.isfile(expected_file) with open(expected_file, "r") as fle: expected = fle.read().strip() p = await asyncio.create_subprocess_exec( shutil.which("pytest"), cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) await asyncio.sleep(2) p.send_signal(signal.SIGINT) await p.wait() got = (await p.stdout.read()).decode().strip().split("\n") while got and not got[0].startswith("collected"): got.pop(0) want = expected.strip().split("\n") if len(got) != len(want): print("\n".join(got)) assert False, "expected different number of lines in output" matcher = LineMatcher(got) matcher.fnmatch_lines(want)
def test_obtain_branches_to_delete(mock_stash_api, github_api, branch_name, expected_message, success): branches_to_delete = list() stash_api = StashAPI("https://myserver.com/stash", username="******", password="******") lines = list( obtain_branches_to_delete( stash_api, github_api, ["PROJ-A", "PROJ-B"], ["esss"], branch_name, branches_to_delete, )) from _pytest.pytester import LineMatcher matcher = LineMatcher(sorted(lines)) if success: matching_lines = [ f"Found branch `{branch_name}` in these repositories:" ] matching_lines.extend(expected_message) matching_lines.append(r"*To confirm, please repeat the command*") assert len(branches_to_delete) == len(expected_message) else: matching_lines = expected_message matcher.re_match_lines(sorted(matching_lines))
def call_merge(branch_text, matching_lines, *, force=False, github_organizations=None): try: lines = list( merge( "https://myserver.com/stash", ["PROJ-A", "PROJ-B"], stash_username="******", stash_password="******", github_username_or_token="", github_password="", github_organizations=list() if github_organizations is None else github_organizations, branch_text=branch_text, confirm=True, force=force, )) except CheckError as e: lines = list(e.lines) from _pytest.pytester import LineMatcher matcher = LineMatcher(sorted(lines)) matcher.re_match_lines(sorted(matching_lines))
def test_exception_repr_extraction_error_on_recursion(): """ Ensure we can properly detect a recursion error even if some locals raise error on comparison (#2459). """ from _pytest.pytester import LineMatcher class numpy_like(object): def __eq__(self, other): if type(other) is numpy_like: raise ValueError("The truth value of an array " "with more than one element is ambiguous.") def a(x): return b(numpy_like()) def b(x): return a(numpy_like()) with pytest.raises(RuntimeError) as excinfo: a(numpy_like()) matcher = LineMatcher(str(excinfo.getrepr()).splitlines()) matcher.fnmatch_lines([ "!!! Recursion error detected, but an error occurred locating the origin of recursion.", "*The following exception happened*", "*ValueError: The truth value of an array*", ])
def test_here_flag(cli_runner, project_tree, monkeypatch): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type monkeypatch: _pytest.monkeypatch """ monkeypatch.chdir(project_tree.join('root_b')) command_args = ['-v', '--here', '--', 'python', '-c', '"name: {name}"'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) # Current working directory is not changed. matcher.fnmatch_lines([ 'dep_z (1/4)', 'deps: executing: python -c "name:\\ dep_z"', 'deps: return code: 0', 'dep_b.1.1 (2/4)', 'deps: executing: python -c "name:\\ dep_b.1.1"', 'deps: return code: 0', 'dep_b.1 (3/4)', 'deps: executing: python -c "name:\\ dep_b.1"', 'deps: return code: 0', 'root_b (4/4)', 'deps: executing: python -c "name:\\ root_b"', 'deps: return code: 0', ])
def test_marked_module_fixture_after(testdir): filename = testdir.copy_example("asyncio/test_marked_mod.py") assert filename.exists() pe = testdir.spawn_pytest("--break=test_marked_mod.py:38 " "test_marked_mod.py::test_spam") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*>*(38)somefix()*", '*del spam*']) pe.sendline("c") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines([ "*1 passed*", ])
def test_execution_on_project_dir(cli_runner, project_tree, monkeypatch): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type monkeypatch: _pytest.monkeypatch """ monkeypatch.chdir(project_tree.join('root_b')) command_args = ['-v', '--', 'python', '-c', '"name: {name}"'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ 'dep_z (1/4)', 'deps: executing: python -c "name:\\ dep_z"', 'deps: from: *[\\/]test_projects0[\\/]dep_z', 'deps: return code: 0', 'dep_b.1.1 (2/4)', 'deps: executing: python -c "name:\\ dep_b.1.1"', 'deps: from: *[\\/]test_projects0[\\/]bs[\\/]dep_b.1.1', 'deps: return code: 0', 'dep_b.1 (3/4)', 'deps: executing: python -c "name:\\ dep_b.1"', 'deps: from: *[\\/]test_projects0[\\/]bs[\\/]dep_b.1', 'deps: return code: 0', 'root_b (4/4)', 'deps: executing: python -c "name:\\ root_b"', 'deps: from: *[\\/]test_projects0[\\/]root_b', 'deps: return code: 0', ])
def test_no_expected_env_file(cli_runner, tmpdir_factory, piped_shell_execute): test_projects = tmpdir_factory.mktemp("test_projects") projects = { "expected_env_file": ( "environment.devenv.yml", ["../unexpected_env_file/foo_environment.devenv.yml"], ), "unexpected_env_file": ("foo_environment.devenv.yml", []), } for proj, (env_filename, deps) in projects.items(): proj_path = proj.split("/") proj_dir = test_projects.ensure(*proj_path, dir=True) env_yml = proj_dir.join(env_filename) env_content = ["name: {}".format(proj), ""] if len(deps) > 0: env_content.append("includes:") env_content.extend( [" - {{{{ root }}}}/{}".format(dep) for dep in deps]) env_content.append("") env_yml.write("\n".join(env_content)) root = str(test_projects.join("expected_env_file")) # Prepare the invocation. command_args = ["-p", root, "echo", "test", "{name}"] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ "unexpected_env_file (1/2)", "test unexpected_env_file", "expected_env_file (2/2)", "test expected_env_file", ])
def test_script_execution(cli_runner, project_tree, piped_shell_execute): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ root_b = str(project_tree.join('root_b')) task_script = os.path.join('tasks', 'asd') command_args = ['-p', root_b, '-v', '-f', 'tasks/asd', task_script, '{name}', '{abs}'] result = cli_runner.invoke(deps_cli.cli, command_args, catch_exceptions=False) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ 'dep_z (1/4)', 'deps: executing: tasks[\\/]asd dep_z *[\\/]test_projects0[\\/]dep_z', 'deps: from: *[\\/]test_projects0[\\/]dep_z', 'Sample script dep_z *[\\/]test_projects0[\\/]dep_z', '', 'deps: return code: 0', 'dep_b.1.1: skipping since "*[\\/]tasks[\\/]asd" does not exist', 'dep_b.1: skipping since "*[\\/]tasks[\\/]asd" does not exist', 'root_b (4/4)', 'deps: executing: tasks[\\/]asd root_b *[\\/]test_projects0[\\/]root_b', 'deps: from: *[\\/]test_projects0[\\/]root_b', 'Sample script root_b *[\\/]test_projects0[\\/]root_b', '', 'deps: return code: 0', ])
def test_class_gap(testdir_class): # If a requested line precedes the start of the first test item, an error # is raised; same goes for intervals between items, as shown here pe = testdir_class.spawn_pytest("--break=test_class_gap.py:10") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*unable to determine*", "*no tests ran*"])
def test_list_repos_precedence(mode, cli_runner, project_tree, piped_shell_execute): """ :type cli_runner: click.testing.CliRunner :type project_tree: py.path.local :type piped_shell_execute: mocker.patch """ root = str(project_tree.join('root_d')) base_args = ['-p', root, '--repos'] command_args = base_args + ['-pp', '--ignore-project=d1', '--ignore-project=d2'] if mode == 'skipped': command_args.append('--skip-project=d3') result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) if mode == 'skipped': matcher.fnmatch_lines([ '*[\\/]test_projects0[\\/]root_d', ' {*[\\/]test_projects0[\\/]d}', ]) else: assert mode == 'normal' matcher.fnmatch_lines([ '*[\\/]test_projects0[\\/]root_d', ' *[\\/]test_projects0[\\/]d', ])
def test_class_gap_named(testdir_class): pe = testdir_class.spawn_pytest( "--break=test_class_gap_named.py:10 " "test_class_gap_named.py::TestClass::test_two") pe.expect(EOF) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines(["*unable to determine*", "*no tests ran*"])
def test_no_expected_env_file(cli_runner, tmpdir_factory, piped_shell_execute): test_projects = tmpdir_factory.mktemp('test_projects') projects = { 'expected_env_file': ('environment.devenv.yml', ['../unexpected_env_file/foo_environment.devenv.yml'],), 'unexpected_env_file': ('foo_environment.devenv.yml', []), } for proj, (env_filename, deps) in projects.items(): proj_path = proj.split('/') proj_dir = test_projects.ensure(*proj_path, dir=True) env_yml = proj_dir.join(env_filename) env_content = ['name: {}'.format(proj), ''] if len(deps) > 0: env_content.append('includes:') env_content.extend( [' - {{{{ root }}}}/{}'.format(dep) for dep in deps]) env_content.append('') env_yml.write('\n'.join(env_content)) root = str(test_projects.join('expected_env_file')) # Prepare the invocation. command_args = ['-p', root, 'echo', 'test', '{name}'] result = cli_runner.invoke(deps_cli.cli, command_args) assert result.exit_code == 0, result.output matcher = LineMatcher(result.output.splitlines()) matcher.fnmatch_lines([ 'unexpected_env_file (1/2)', 'test unexpected_env_file', 'expected_env_file (2/2)', 'test expected_env_file', ])
def test_no_bt_all(testdir_setup): testdir_setup.makepyfile(test_file=""" def test_foo(): assert True """) pe = testdir_setup.spawn_pytest("--break=test_file.py:2") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) befs.fnmatch_lines("*>*/test_file.py(2)test_foo()") pe.sendline("w") pe.expect(prompt_re) befs = LineMatcher(unansi(pe.before)) assert "runcall_until" not in befs.str() befs.fnmatch_lines("*>*/test_file.py(2)test_foo()") pe.sendline("c") pe.expect(EOF)