def _GetAllPossiblePageTestInstances(): page_test_instances = [] measurements_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(measurements_dir) benchmarks_dir = os.path.join(top_level_dir, 'benchmarks') # Get all page test instances from measurement classes that are directly # constructible all_measurement_classes = discover.DiscoverClasses( measurements_dir, top_level_dir, legacy_page_test.LegacyPageTest, index_by_class_name=True, directly_constructable=True).values() for measurement_class in all_measurement_classes: page_test_instances.append(measurement_class()) all_benchmarks_classes = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values() # Get all page test instances from defined benchmarks. # Note: since this depends on the command line options, there is no guarantee # that this will generate all possible page test instances but it's worth # enough for smoke test purpose. for benchmark_cls in all_benchmarks_classes: options = options_for_unittests.GetRunOptions( benchmark_cls=benchmark_cls) pt = benchmark_cls().CreatePageTest(options) if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement): page_test_instances.append(pt) return page_test_instances
def GetAllStorySetClasses(self, story_sets_dir, top_level_dir): # We can't test page sets that aren't directly constructible since we # don't know what arguments to put for the constructor. return discover.DiscoverClasses(story_sets_dir, top_level_dir, story_module.StorySet, directly_constructable=True).values()
def FindPageSetDependencies(base_dir): logging.info('Finding page sets in %s', base_dir) # Add base_dir to path so our imports relative to base_dir will work. sys.path.append(base_dir) tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark, index_by_class_name=True) for test_class in tests.itervalues(): test_obj = test_class() # Ensure the test's default options are set if needed. parser = optparse.OptionParser() test_obj.AddCommandLineArgs(parser, None) options = optparse.Values() for k, v in parser.get_default_values().__dict__.iteritems(): options.ensure_value(k, v) # Page set paths are relative to their runner script, not relative to us. path.GetBaseDir = lambda: base_dir # TODO: Loading the page set will automatically download its Cloud Storage # deps. This is really expensive, and we don't want to do this by default. story_set = test_obj.CreateStorySet(options) # Add all of its serving_dirs as dependencies. for serving_dir in story_set.serving_dirs: yield serving_dir
def _MatchPageSetName(story_set_name, story_set_base_dir): story_sets = discover.DiscoverClasses(story_set_base_dir, story_set_base_dir, story.StorySet).values() for s in story_sets: if story_set_name == s.Name(): return s return None
def ListOfValuesFromListOfDicts(value_dicts, page_dict): """Takes a list of value dicts to values. Given a list of value dicts produced by AsDict, this method deserializes the dicts given a dict mapping page IDs to pages. This method performs memoization for deserializing a list of values efficiently, where FromDict is meant to handle one-offs. values: a list of value dicts produced by AsDict() on a value subclass. page_dict: a dictionary mapping IDs to page objects. """ value_dir = os.path.dirname(__file__) value_classes = discover.DiscoverClasses(value_dir, util.GetTelemetryDir(), Value, index_by_class_name=True) value_json_types = dict( (value_classes[x].GetJSONTypeName(), x) for x in value_classes) values = [] for value_dict in value_dicts: value_class = value_classes[value_json_types[value_dict['type']]] assert 'FromDict' in value_class.__dict__, \ 'Subclass doesn\'t override FromDict' values.append(value_class.FromDict(value_dict, page_dict)) return values
def getAllExpectationsInDirectory(self): all_expectations = discover.DiscoverClasses( self._base_dir, self._base_dir, gpu_test_expectations.GpuTestExpectations) return [ e for e in all_expectations.itervalues() if not self.getFullClassName(e) in self._skipped_expectations ]
def _Benchmarks(environment): benchmarks = [] for search_dir in environment.benchmark_dirs: benchmarks += discover.DiscoverClasses(search_dir, environment.top_level_dir, benchmark.Benchmark, index_by_class_name=True).values() return benchmarks
def _IterAllRenderingStoryClasses(): start_dir = os.path.dirname(os.path.abspath(__file__)) # Sort the classes by their names so that their order is stable and # deterministic. for _, cls in sorted(discover.DiscoverClasses( start_dir=start_dir, top_level_dir=os.path.dirname(start_dir), base_class=rendering_story.RenderingStory).iteritems()): yield cls
def _FindTestCases(): test_cases = [] for start_dir in gpu_project_config.CONFIG.start_dirs: modules_to_classes = discover.DiscoverClasses( start_dir, gpu_project_config.CONFIG.top_level_dir, base_class=gpu_integration_test.GpuIntegrationTest) test_cases.extend(modules_to_classes.values()) return test_cases
def GetDiagnosticType(typename): if not Diagnostic._subtypes: Diagnostic._subtypes = discover.DiscoverClasses( os.path.join(tracing_project.TracingProject.tracing_src_path, 'value'), tracing_project.TracingProject.tracing_root_path, Diagnostic, index_by_class_name=True) # TODO(eakuefner): Add camelcase mode to discover.DiscoverClasses. return Diagnostic._subtypes.get(camel_case.ToUnderscore(typename))
def _IterAllSystemHealthStoryClasses(): start_dir = os.path.dirname(os.path.abspath(__file__)) # Sort the classes by their names so that their order is stable and # deterministic. for unused_cls_name, cls in sorted( discover.DiscoverClasses( start_dir=start_dir, top_level_dir=os.path.dirname(start_dir), base_class=system_health_story.SystemHealthStory).iteritems()): yield cls
def _GetAllPossiblePageTestInstances(): page_test_instances = [] measurements_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(measurements_dir) benchmarks_dir = os.path.join(top_level_dir, 'benchmarks') # Get all page test instances from measurement classes that are directly # constructible all_measurement_classes = discover.DiscoverClasses( measurements_dir, top_level_dir, legacy_page_test.LegacyPageTest, index_by_class_name=True, directly_constructable=True).values() for measurement_class in all_measurement_classes: page_test_instances.append(measurement_class()) all_benchmarks_classes = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values() # Get all page test instances from defined benchmarks. # Note: since this depends on the command line options, there is no guaranteed # that this will generate all possible page test instances but it's worth # enough for smoke test purpose. for benchmark_class in all_benchmarks_classes: options = options_for_unittests.GetCopy() parser = optparse.OptionParser() browser_options.BrowserOptions.AddCommandLineArgs(parser) try: benchmark_class.AddCommandLineArgs(parser) benchmark_module.AddCommandLineArgs(parser) benchmark_class.SetArgumentDefaults(parser) except Exception: logging.error('Exception raised when processing benchmark %s', benchmark_class) raise options.MergeDefaultValues(parser.get_default_values()) pt = benchmark_class().CreatePageTest(options) if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement): page_test_instances.append(pt) return page_test_instances
def _GetAllSystemHealthBenchmarks(): all_perf_benchmarks = discover.DiscoverClasses( path_util.GetOfficialBenchmarksDir(), path_util.GetPerfDir(), benchmark_module.Benchmark, index_by_class_name=True).values() return [ b for b in all_perf_benchmarks if sys.modules[b.__module__] == system_health_benchmark ]
def FindTestCase(test_name): for start_dir in gpu_project_config.CONFIG.start_dirs: modules_to_classes = discover.DiscoverClasses( start_dir, gpu_project_config.CONFIG.top_level_dir, base_class=serially_executed_browser_test_case. SeriallyExecutedBrowserTestCase) for cl in modules_to_classes.values(): if cl.Name() == test_name: return cl
def current_benchmarks(): benchmarks_dir = os.path.join( path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmarks') top_level_dir = os.path.dirname(benchmarks_dir) all_benchmarks = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=True).values() return sorted(all_benchmarks, key=lambda b: b.Name())
def _GetSubclasses(base_dir, cls): """Returns all subclasses of |cls| in |base_dir|. Args: cls: a class Returns: dict of {underscored_class_name: benchmark class} """ return discover.DiscoverClasses(base_dir, base_dir, cls, index_by_class_name=True)
def GetContribBenchmarks(): """Returns the list of all contrib benchmarks. The benchmarks are sorted by order of their names. """ benchmarks = discover.DiscoverClasses( start_dir=path_util.GetContribDir(), top_level_dir=path_util.GetPerfDir(), base_class=benchmark_module.Benchmark, index_by_class_name=True).values() benchmarks.sort(key=lambda b: b.Name()) return benchmarks
def _FindTestCases(): test_cases = [] for start_dir in gpu_project_config.CONFIG.start_dirs: # Note we deliberately only scan the integration tests as a # workaround for http://crbug.com/1195465 . modules_to_classes = discover.DiscoverClasses( start_dir, gpu_project_config.CONFIG.top_level_dir, base_class=gpu_integration_test.GpuIntegrationTest, pattern='*_integration_test.py') test_cases.extend(modules_to_classes.values()) return test_cases
def testDiscoverClassesWithPatternAndIndexByModule(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class, pattern='another*', index_by_class_name=False) actual_classes = dict( (name, cls.__name__) for name, cls in six.iteritems(classes)) expected_classes = { 'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1' } self.assertEqual(actual_classes, expected_classes)
def GetBenchmarks(self): """Return a list of all benchmark classes found in this configuration.""" if self._benchmarks is None: benchmarks = [] for search_dir in self.benchmark_dirs: benchmarks.extend( discover.DiscoverClasses( search_dir, self.top_level_dir, benchmark.Benchmark, index_by_class_name=True).values()) self._benchmarks = benchmarks return list(self._benchmarks)
def load_tests(loader, standard_tests, pattern): del loader, standard_tests, pattern # unused suite = progress_reporter.TestSuite() benchmarks_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(benchmarks_dir) # Using the default of |index_by_class_name=False| means that if a module # has multiple benchmarks, only the last one is returned. all_benchmarks = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=False).values() for benchmark in all_benchmarks: if sys.modules[benchmark.__module__] in _BLACK_LIST_TEST_MODULES: continue if benchmark.Name() in _BLACK_LIST_TEST_NAMES: continue class BenchmarkSmokeTest(unittest.TestCase): pass # tab_switching needs more than one page to test correctly. if 'tab_switching' in benchmark.Name(): method = SmokeTestGenerator(benchmark, num_pages=2) else: method = SmokeTestGenerator(benchmark) # Make sure any decorators are propagated from the original declaration. # (access to protected members) pylint: disable=protected-access # TODO(dpranke): Since we only pick the first test from every class # (above), if that test is disabled, we'll end up not running *any* # test from the class. We should probably discover all of the tests # in a class, and then throw the ones we don't need away instead. disabled_benchmark_attr = decorators.DisabledAttributeName(benchmark) disabled_method_attr = decorators.DisabledAttributeName(method) enabled_benchmark_attr = decorators.EnabledAttributeName(benchmark) enabled_method_attr = decorators.EnabledAttributeName(method) MergeDecorators(method, disabled_method_attr, benchmark, disabled_benchmark_attr) MergeDecorators(method, enabled_method_attr, benchmark, enabled_benchmark_attr) setattr(BenchmarkSmokeTest, benchmark.Name(), method) suite.addTest(BenchmarkSmokeTest(benchmark.Name())) return suite
def current_benchmarks(): benchmarks_dir = os.path.join( path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmarks') top_level_dir = os.path.dirname(benchmarks_dir) all_benchmarks = [] for b in discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=True).values(): if not b.Name() in _UNSCHEDULED_TELEMETRY_BENCHMARKS: all_benchmarks.append(b) return sorted(all_benchmarks, key=lambda b: b.Name())
def testDiscoverDirectlyConstructableClassesWithIndexByClassName(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class, directly_constructable=True) actual_classes = dict( (name, cls.__name__) for name, cls in six.iteritems(classes)) expected_classes = { 'dummy_exception': 'DummyException', 'dummy_exception_impl1': 'DummyExceptionImpl1', 'dummy_exception_impl2': 'DummyExceptionImpl2', } self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesWithIndexByModuleName(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class, index_by_class_name=False) actual_classes = dict((name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1', 'discover_dummyclass': 'DummyException', 'parameter_discover_dummyclass': 'DummyExceptionWithParameterImpl2' } self.assertEqual(actual_classes, expected_classes)
def IterAllSystemHealthStoryClasses(): """Generator for system health stories. Yields: All appropriate SystemHealthStory subclasses defining stories. """ start_dir = os.path.dirname(os.path.abspath(__file__)) # Sort the classes by their names so that their order is stable and # deterministic. for unused_cls_name, cls in sorted( discover.DiscoverClasses( start_dir=start_dir, top_level_dir=os.path.dirname(start_dir), base_class=system_health_story.SystemHealthStory).items()): yield cls
def _DiscoverProfileExtenderClasses(): profile_extenders_dir = (os.path.abspath( os.path.join(util.GetBaseDir(), '..', 'perf', 'profile_creators'))) base_dir = os.path.abspath(os.path.join(profile_extenders_dir, '..')) profile_extenders_unfiltered = discover.DiscoverClasses( profile_extenders_dir, base_dir, profile_extender.ProfileExtender) # Remove 'extender' suffix from keys. profile_extenders = {} for test_name, test_class in profile_extenders_unfiltered.iteritems(): assert test_name.endswith('_extender') test_name = test_name[:-len('_extender')] profile_extenders[test_name] = test_class return profile_extenders
def testDiscoverClassesWithPatternAndIndexByClassName(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class, pattern='another*') actual_classes = dict((name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'dummy_exception_impl1': 'DummyExceptionImpl1', 'dummy_exception_impl2': 'DummyExceptionImpl2', 'dummy_exception_with_parameter_impl1': 'DummyExceptionWithParameterImpl1', } self.assertEqual(actual_classes, expected_classes)
def RunTests(args): parser = _CreateTestArgParsers() try: options, extra_args = parser.parse_known_args(args) except arg_parser._Bailout: PrintTelemetryHelp() return parser.exit_status binary_manager.InitDependencyManager(options.client_configs) for start_dir in options.start_dirs: modules_to_classes = discover.DiscoverClasses( start_dir, options.top_level_dir, base_class=serially_executed_browser_test_case. SeriallyExecutedBrowserTestCase) browser_test_classes = modules_to_classes.values() _ValidateDistinctNames(browser_test_classes) test_class = None for cl in browser_test_classes: if cl.Name() == options.test: test_class = cl break if not test_class: print('Cannot find test class with name matching %s' % options.test) print('Available tests: %s' % '\n'.join( cl.Name() for cl in browser_test_classes)) return 1 test_class._typ_runner = typ_runner = typ.Runner() # Create test context. typ_runner.context = browser_test_context.TypTestContext() for c in options.client_configs: typ_runner.context.client_configs.append(c) typ_runner.context.finder_options = ProcessCommandLineOptions( test_class, options, extra_args) typ_runner.context.test_class = test_class typ_runner.context.expectations_files = options.expectations_files test_times = None if options.read_abbreviated_json_results_from: with open(options.read_abbreviated_json_results_from, 'r') as f: abbr_results = json.load(f) test_times = abbr_results.get('times') # Setup typ.Runner instance. typ_runner.args.all = options.all typ_runner.args.expectations_files = options.expectations_files typ_runner.args.jobs = options.jobs typ_runner.args.list_only = options.list_only typ_runner.args.metadata = options.metadata typ_runner.args.passthrough = options.passthrough typ_runner.args.path = options.path typ_runner.args.quiet = options.quiet typ_runner.args.repeat = options.repeat typ_runner.args.repository_absolute_path = options.repository_absolute_path typ_runner.args.retry_limit = options.retry_limit typ_runner.args.retry_only_retry_on_failure_tests = ( options.retry_only_retry_on_failure_tests) typ_runner.args.skip = options.skip typ_runner.args.suffixes = TEST_SUFFIXES typ_runner.args.tags = options.tags typ_runner.args.test_name_prefix = options.test_name_prefix typ_runner.args.test_filter = options.test_filter typ_runner.args.test_results_server = options.test_results_server typ_runner.args.test_type = options.test_type typ_runner.args.top_level_dir = options.top_level_dir typ_runner.args.write_full_results_to = options.write_full_results_to typ_runner.args.write_trace_to = options.write_trace_to typ_runner.args.disable_resultsink = options.disable_resultsink typ_runner.classifier = _GetClassifier(typ_runner) typ_runner.path_delimiter = test_class.GetJSONResultsDelimiter() typ_runner.setup_fn = _SetUpProcess typ_runner.teardown_fn = _TearDownProcess tests_to_run = LoadTestCasesToBeRun( test_class=test_class, finder_options=typ_runner.context.finder_options, filter_tests_after_sharding=options.filter_tests_after_sharding, total_shards=options.total_shards, shard_index=options.shard_index, test_times=test_times, debug_shard_distributions=options.debug_shard_distributions, typ_runner=typ_runner) for t in tests_to_run: typ_runner.context.test_case_ids_to_run.add(t.id()) typ_runner.context.Freeze() browser_test_context._global_test_context = typ_runner.context # several class level variables are set for GPU tests when # LoadTestCasesToBeRun is called. Functions line ExpectationsFiles and # GenerateTags which use these variables should be called after # LoadTestCasesToBeRun test_class_expectations_files = test_class.ExpectationsFiles() # all file paths in test_class_expectations-files must be absolute assert all(os.path.isabs(path) for path in test_class_expectations_files) typ_runner.args.expectations_files.extend( test_class_expectations_files) typ_runner.args.ignored_tags.extend(test_class.IgnoredTags()) # Since sharding logic is handled by browser_test_runner harness by passing # browser_test_context.test_case_ids_to_run to subprocess to indicate test # cases to be run, we explicitly disable sharding logic in typ. typ_runner.args.total_shards = 1 typ_runner.args.shard_index = 0 typ_runner.args.timing = True typ_runner.args.verbose = options.verbose typ_runner.win_multiprocessing = typ.WinMultiprocessing.importable try: ret, _, _ = typ_runner.run() except KeyboardInterrupt: print("interrupted, exiting", file=sys.stderr) ret = 130 return ret
def GetBenchmarksInSubDirectory(directory): return discover.DiscoverClasses( start_dir=directory, top_level_dir = path_util.GetPerfDir(), base_class=benchmark_module.Benchmark, index_by_class_name=True).values()
def _IterAllPlatformBackendClasses(): platform_dir = os.path.dirname( os.path.realpath(platform_backend_module.__file__)) return discover.DiscoverClasses( platform_dir, util.GetTelemetryDir(), platform_backend_module.PlatformBackend).itervalues()