def _(): events = [] @fixture(scope=Scope.Module) def a(): events.append("resolve a") yield "a" events.append("teardown a") @testable_test def test_1(a=each(a, "second", a)): events.append("running test") suite = Suite(tests=[Test(fn=test_1, module_name="module1")]) list(suite.generate_test_runs()) # Ensure that each parameterised instance of the final test in the # module runs before the module-level teardown occurs. assert events == [ "resolve a", "running test", "running test", "running test", "teardown a", ]
def _(module=module): events = [] @fixture def a(): events.append(1) # Both of the fixtures below depend on 'a', but 'a' should only be executed once. @fixture def b(a=a): events.append(2) @fixture def c(a=a): events.append(3) @testable_test def test(b=b, c=c): pass suite = Suite(tests=[Test(fn=test, module_name=module)]) list(suite.generate_test_runs()) assert events == [1, 2, 3]
def _(module=module): events = [] @fixture def fix_a(): events.append(1) yield "a" events.append(4) @fixture def fix_b(): events.append(2) return "b" @fixture def fix_c(fix_b=fix_b): events.append(3) yield "c" events.append(5) @testable_test def my_test(fix_a=fix_a, fix_c=fix_c): assert fix_a == "a" assert fix_c == "c" suite = Suite(tests=[Test(fn=my_test, module_name=module)]) # Exhaust the test runs generator list(suite.generate_test_runs()) assert events == [1, 2, 3, 4, 5]
def _(module=module): events = [] error_message = "example" teardown_err = ZeroDivisionError(error_message) @fixture def fix_a(): events.append(1) yield "a" raise teardown_err @testable_test def my_test(fix_a=fix_a): assert fix_a == "a" test_obj = Test(fn=my_test, module_name=module) suite = Suite(tests=[test_obj]) results = list(suite.generate_test_runs()) assert results == [ TestResult( test=test_obj, outcome=TestOutcome.FAIL, error=teardown_err, message=error_message, ) ]
def _(skipped=skipped_test, example=example_test): suite = Suite(tests=[example, skipped]) test_runs = list(suite.generate_test_runs()) expected_runs = [ TestResult(example, TestOutcome.PASS, None, ""), TestResult(skipped, TestOutcome.SKIP, None, ""), ] assert test_runs == expected_runs
def _(skipped=skipped_test, example=example_test): suite = Suite(tests=[example, skipped]) test_runs = list(suite.generate_test_runs(dry_run=True)) expected_runs = [ TestResult(example, TestOutcome.DRYRUN, None, ""), TestResult(skipped, TestOutcome.DRYRUN, None, ""), ] assert test_runs == expected_runs
def _(module=module): @testable_test def _(): assert False t = Test(fn=_, module_name=module) failing_suite = Suite(tests=[t]) results = failing_suite.generate_test_runs() result = next(results) expected_result = TestResult(test=t, outcome=TestOutcome.FAIL, error=mock.ANY, message="") assert result == expected_result assert isinstance(result.error, AssertionError)
def _(): events = [] @fixture(scope=Scope.Module) def a(): events.append("resolve") yield "a" events.append("teardown") @testable_test def test1(a=a): events.append("test1") @testable_test def test2(a=a): events.append("test2") @testable_test def test3(a=a): events.append("test3") # For testing purposes we need to assign paths ourselves, # since our test functions are all defined at the same path test1.ward_meta.path = "module1" test2.ward_meta.path = "module2" test3.ward_meta.path = "module2" suite = Suite(tests=[ Test(fn=test1, module_name="module1"), Test(fn=test2, module_name="module2"), Test(fn=test3, module_name="module2"), ]) list(suite.generate_test_runs()) assert events == [ "resolve", # Resolve at start of module1 "test1", "teardown", # Teardown at end of module1 "resolve", # Resolve at start of module2 "test2", "test3", "teardown", # Teardown at end of module2 ]
def _(): @testable_test def test_1(a=each(1, 2), b=each(1, 2, 3)): pass @testable_test def test_2(a=each(1, 2), b=each(1, 2)): pass suite = Suite(tests=[ Test(fn=test_1, module_name="module1"), Test(fn=test_2, module_name="module2"), ]) results = list(suite.generate_test_runs()) assert ( len(results) == 1 + 2 ) # the first test doesn't expand, and the 2nd test expands into 2 tests assert type(results[0].error) == ParameterisationError
def _(): events = [] @fixture(scope=Scope.Global) def a(): events.append("resolve") yield "a" events.append("teardown") @testable_test def test1(a=a): events.append("test1") @testable_test def test2(a=a): events.append("test2") @testable_test def test3(a=a): events.append("test3") suite = Suite(tests=[ Test(fn=test1, module_name="module1"), Test(fn=test2, module_name="module2"), Test(fn=test3, module_name="module2"), ]) list(suite.generate_test_runs()) assert events == [ "resolve", # Resolve at start of run only "test1", "test2", "test3", "teardown", # Teardown only at end of run ]
def _(): events = [] @fixture(scope=Scope.Global) async def a(): events.append("resolve a") yield "a" events.append("teardown a") @fixture(scope=Scope.Module) async def b(): events.append("resolve b") yield "b" events.append("teardown b") @fixture(scope=Scope.Test) async def c(): events.append("resolve c") yield "c" events.append("teardown c") @testable_test async def test_1(a=a, b=b, c=c): events.append("test1") @testable_test async def test_2(a=a, b=b, c=c): events.append("test2") @testable_test async def test_3(a=a, b=b, c=c): events.append("test3") test_1.ward_meta.path = "module1" test_2.ward_meta.path = "module2" test_3.ward_meta.path = "module1" suite = Suite(tests=[ # Module ordering is intentionally off here, to ensure correct # interaction between module-scope fixtures and random ordering Test(fn=test_1, module_name="module1"), Test(fn=test_2, module_name="module2"), Test(fn=test_3, module_name="module1"), ]) list(suite.generate_test_runs()) assert events == [ "resolve a", # global fixture so resolved at start "resolve b", # module fixture resolved at start of module1 "resolve c", # test fixture resolved at start of test1 "test1", "teardown c", # test fixture teardown at start of test1 "resolve b", # module fixture resolved at start of module2 "resolve c", # test fixture resolved at start of test2 "test2", "teardown c", # test fixture teardown at start of test2 "teardown b", # module fixture teardown at end of module2 "resolve c", # test fixture resolved at start of test3 "test3", "teardown c", # test fixture teardown at end of test3 "teardown b", # module fixture teardown at end of module1 "teardown a", # global fixtures are torn down at the very end ]
def test( ctx: click.Context, config: Optional[Path], project_root: Optional[Path], # None if the project root cant be found config_path: Optional[Path], # added by callback on '--config' option path: Tuple[str], exclude: Tuple[str], search: Optional[str], tags: Optional[Expression], fail_limit: Optional[int], test_output_style: str, progress_style: List[str], order: str, capture_output: bool, show_slowest: int, show_diff_symbols: bool, dry_run: bool, hook_module: Tuple[str], ): """Run tests.""" config_params = ctx.params.copy() config_params.pop("config") config = Config(**config_params, plugin_config=config_params.get("plugins", {})) test_output_style = TestOutputStyle(test_output_style) progress_styles = [TestProgressStyle(ps) for ps in progress_style] init_breakpointhooks(pdb, sys) start_run = default_timer() print_before: Tuple[ConsoleRenderable] = plugins.hook.before_session(config=config) configure_path(project_root) paths = [Path(p) for p in path] mod_infos = get_info_for_modules(paths, exclude) modules = load_modules(mod_infos) unfiltered_tests = get_tests_in_modules(modules, capture_output) plugins.hook.preprocess_tests(config=config, collected_tests=unfiltered_tests) filtered_tests = filter_tests(unfiltered_tests, query=search, tag_expr=tags) if config.order == "random": shuffle(filtered_tests) tests = rewrite_assertions_in_tests(filtered_tests) time_to_collect_secs = default_timer() - start_run suite = Suite(tests=tests) test_results = suite.generate_test_runs(dry_run=dry_run) rich_console.print( SessionPrelude( time_to_collect_secs=time_to_collect_secs, num_tests_collected=suite.num_tests_with_parameterisation, num_fixtures_collected=len(_DEFINED_FIXTURES), config_path=config_path, ) ) writer = TestResultWriter( console=rich_console, suite=suite, test_output_style=test_output_style, progress_styles=progress_styles, config_path=config_path, show_diff_symbols=show_diff_symbols, ) for renderable in print_before: rich_console.print(renderable) test_results = writer.output_all_test_results(test_results, fail_limit=fail_limit) exit_code = get_exit_code(test_results) time_taken = default_timer() - start_run render_afters: Tuple[ConsoleRenderable] = plugins.hook.after_session( config=config, test_results=test_results, status_code=exit_code ) for renderable in render_afters: rich_console.print(renderable) writer.output_test_result_summary(test_results, time_taken, show_slowest) sys.exit(exit_code.value)