def compute_diff(old_suites, new_suites): diff = Diff() new_tests = {test.path: test for test in flatten_tests(new_suites)} for old_test in flatten_tests(old_suites): # handle removed tests try: new_test = find_test(new_suites, old_test.path) except CannotFindTreeNode: diff.removed.append(old_test) continue # handle status-changed tests if new_test.status != old_test.status: if old_test.status not in diff.status_changed: diff.status_changed[old_test.status] = {} if new_test.status not in diff.status_changed[old_test.status]: diff.status_changed[old_test.status][new_test.status] = [] diff.status_changed[old_test.status][new_test.status].append( new_test) del new_tests[new_test.path] # handle added tests diff.added.extend(new_tests.values()) return diff
def run_cmd(self, cli_args): colorama.init() project = load_project() suites = project.get_suites() fixtures = project.get_fixtures() fixtures_by_scope = {} for fixt in fixtures: if fixt.scope in fixtures_by_scope: fixtures_by_scope[fixt.scope].append(fixt) else: fixtures_by_scope[fixt.scope] = [fixt] used_by_tests = {} for test in flatten_tests(suites): for fixt_name in test.get_fixtures(): used_by_tests[fixt_name] = used_by_tests.get(fixt_name, 0) + 1 used_by_fixtures = {} for fixt in fixtures: for param in fixt.params: used_by_fixtures[param] = used_by_fixtures.get(param, 0) + 1 for scope in "session_prerun", "session", "suite", "test": self.show_fixtures(scope, fixtures_by_scope.get(scope, []), used_by_tests, used_by_fixtures) print() return 0
def print_report(report, filtr=None, max_width=None, explicit=False): ### # Setup terminal ### colorama.init() if not max_width: max_width, _ = terminalsize.get_terminal_size() ### # Get a generator over data to be printed on the console ### renderer = Renderer(max_width=max_width, explicit=explicit) if not filtr or filtr.is_empty(): if report.nb_tests == 0: print("No tests found in report") return data = renderer.render_report(report) else: suites = filter_suites(report.get_suites(), filtr) if not suites: print("The filter does not match any test in the report") return data = renderer.render_tests(flatten_tests(suites)) ### # Do the actual job ### _print_data(data)
def test_flatten_tests_on_nested_suites(): @lcc.suite("My suite 1") class mysuite1: @lcc.test("Test 1") def test1(self): pass @lcc.suite("My suite 2") class mysuite2: @lcc.test("Test 2") def test2(self): pass @lcc.suite("My suite 3") class mysuite3: @lcc.test("Test 3") def test3(self): pass @lcc.suite("My suite 4") class mysuite4: @lcc.test("Test 4") def test4(self): pass suites = load_suites_from_classes([mysuite1, mysuite3]) tests = flatten_tests(suites) assert [t.name for t in tests] == ["test1", "test2", "test3", "test4"]
def build_tasks(suites, fixture_registry, session_scheduled_fixtures): ### # Build test session setup task ### test_session_setup_task = build_test_session_setup_task( session_scheduled_fixtures) ### # Build suite tasks ### suite_tasks = [] for suite in suites: suite_tasks.extend( build_suite_tasks(suite, fixture_registry, session_scheduled_fixtures, test_session_setup_task)) ### # Build test session teardown task ### if test_session_setup_task: test_session_teardown_dependencies = [ task for task in suite_tasks if isinstance(task, SuiteEndingTask) and task.suite in suites ] test_session_teardown_task = build_test_session_teardown_task( test_session_setup_task, test_session_teardown_dependencies) else: test_session_teardown_task = None ### # Get all effective tasks (task != None) ### task_iter = itertools.chain((test_session_setup_task, ), suite_tasks, (test_session_teardown_task, )) tasks = list(filter(bool, task_iter)) ### # Add extra dependencies in tasks for tests that depend on other tests ### for test in flatten_tests(suites): if not test.dependencies: continue test_task = lookup_test_task(tasks, test.path) for dep_test_path in test.dependencies: try: dep_test = lookup_test_task(tasks, dep_test_path) except LookupError: raise UserError( "Cannot find dependency test '%s' for '%s', " "either the test does not exist or is not going to be run" % (dep_test_path, test.path)) test_task.dependencies.append(dep_test) ### # Return tasks ### return tasks
def initialize_event_manager(suites, reporting_backends, report_dir, report_saving_strategy, nb_threads): event_manager = events.AsyncEventManager.load() report = Report() report.nb_threads = nb_threads writer = ReportWriter(report) event_manager.add_listener(writer) initialize_runtime(event_manager, report_dir, report) nb_tests = len(list(flatten_tests(suites))) parallelized = nb_threads > 1 and nb_tests > 1 for backend in reporting_backends: session = backend.create_reporting_session(report_dir, report, parallelized, report_saving_strategy) event_manager.add_listener(session) return event_manager
def _test_filter(suites, filtr, expected_test_paths): filtered_suites = filter_suites(suites, filtr) filtered_tests = flatten_tests(filtered_suites) assert sorted([t.path for t in filtered_tests]) == sorted(expected_test_paths)
def _make_from_report_filter(cli_args, only_executed_tests=False): report = load_report(cli_args.from_report or DEFAULT_REPORT_DIR_NAME) filtr = _make_report_filter(cli_args, only_executed_tests=only_executed_tests) suites = filter_suites(report.get_suites(), filtr) return FromTestsFilter(flatten_tests(suites))
def get_top_tests(suites): tests = TopTests.get_tests_ordered_by_duration(flatten_tests(suites)) total_duration = get_total_duration(tests) return [ TopTests.format_test_entry(test, total_duration) for test in tests ]