def _MatchPageSetName(story_set_name, story_set_base_dir): story_sets = discover.DiscoverClasses(story_set_base_dir, story_set_base_dir, story.StorySet).values() for s in story_sets: if story_set_name == s.Name(): return s return None
def load_tests(loader, standard_tests, pattern): del loader, standard_tests, pattern # unused suite = progress_reporter.TestSuite() benchmarks_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(benchmarks_dir) # Using the default of |index_by_class_name=False| means that if a module # has multiple benchmarks, only the last one is returned. all_benchmarks = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=False).values() for benchmark in all_benchmarks: if sys.modules[benchmark.__module__] in _BLACK_LIST_TEST_MODULES: continue # TODO(tonyg): Smoke doesn't work with session_restore yet. if (benchmark.Name().startswith('session_restore') or benchmark.Name().startswith('skpicture_printer')): continue if hasattr(benchmark, 'generated_profile_archive'): # We'd like to test these, but don't know how yet. continue class BenchmarkSmokeTest(unittest.TestCase): pass method = SmokeTestGenerator(benchmark) # Make sure any decorators are propagated from the original declaration. # (access to protected members) pylint: disable=W0212 # TODO(dpranke): Since we only pick the first test from every class # (above), if that test is disabled, we'll end up not running *any* # test from the class. We should probably discover all of the tests # in a class, and then throw the ones we don't need away instead. # Merge decorators. for attribute in ['_enabled_strings', '_disabled_strings']: # Do set union of attributes to eliminate duplicates. merged_attributes = list( set( getattr(method, attribute, []) + getattr(benchmark, attribute, []))) if merged_attributes: setattr(method, attribute, merged_attributes) # Handle the case where the benchmark is Enabled/Disabled everywhere. if (getattr(method, attribute, None) == [] or getattr(benchmark, attribute, None) == []): setattr(method, attribute, []) setattr(BenchmarkSmokeTest, benchmark.Name(), method) suite.addTest(BenchmarkSmokeTest(benchmark.Name())) return suite
def _GetTests(): # Lazy load and cache results. if not hasattr(_GetTests, 'tests'): base_dir = util.GetBaseDir() _GetTests.tests = discover.DiscoverClasses(base_dir, base_dir, test.Test, index_by_class_name=True) return _GetTests.tests
def _GetTests(): # Lazy load and cache results. if not hasattr(_GetTests, 'tests'): base_dir = util.GetBaseDir() tests = discover.DiscoverClasses(base_dir, base_dir, test.Test, index_by_class_name=True) tests = dict((test.GetName(), test) for test in tests.itervalues()) _GetTests.tests = tests return _GetTests.tests
def _MatchPageSetName(page_set_name, page_set_base_dir): page_sets = [] page_sets += discover.DiscoverClasses(page_set_base_dir, page_set_base_dir, page_set.PageSet, index_by_class_name=True).values() for p in page_sets: if page_set_name == p.Name(): return p return None
def testDiscoverClassesOneClassPerModule(self): classes = discover.DiscoverClasses( self._start_dir, self._base_dir, self._base_class, one_class_per_module=True) actual_classes = list(cls.__name__ for cls in classes) expected_classes = ['DummyExceptionImpl1', 'DummyException', 'DummyExceptionWithParameterImpl2'] self.assertItemsEqual(actual_classes, expected_classes)
def testDiscoverOneDirectlyConstructableClassPerModuleWithPattern(self): classes = discover.DiscoverClasses( self._start_dir, self._base_dir, self._base_class, pattern='another*', directly_constructable=True, one_class_per_module=True) actual_classes = list(cls.__name__ for cls in classes) expected_classes = ['DummyExceptionImpl1'] self.assertItemsEqual(actual_classes, expected_classes)
def load_tests(loader, standard_tests, pattern): del loader, standard_tests, pattern # unused suite = progress_reporter.TestSuite() benchmarks_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(benchmarks_dir) # Using the default of |index_by_class_name=False| means that if a module # has multiple benchmarks, only the last one is returned. all_benchmarks = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=False).values() for benchmark in all_benchmarks: if sys.modules[benchmark.__module__] in _BLACK_LIST_TEST_MODULES: continue # TODO(tonyg): Smoke doesn't work with session_restore yet. if (benchmark.Name().startswith('session_restore') or benchmark.Name().startswith('skpicture_printer')): continue if hasattr(benchmark, 'generated_profile_archive'): # We'd like to test these, but don't know how yet. continue class BenchmarkSmokeTest(unittest.TestCase): pass # tab_switching needs more than one page to test correctly. if 'tab_switching' in benchmark.Name(): method = SmokeTestGenerator(benchmark, num_pages=2) else: method = SmokeTestGenerator(benchmark) # Make sure any decorators are propagated from the original declaration. # (access to protected members) pylint: disable=protected-access # TODO(dpranke): Since we only pick the first test from every class # (above), if that test is disabled, we'll end up not running *any* # test from the class. We should probably discover all of the tests # in a class, and then throw the ones we don't need away instead. disabled_benchmark_attr = decorators.DisabledAttributeName(benchmark) disabled_method_attr = decorators.DisabledAttributeName(method) enabled_benchmark_attr = decorators.EnabledAttributeName(benchmark) enabled_method_attr = decorators.EnabledAttributeName(method) MergeDecorators(method, disabled_method_attr, benchmark, disabled_benchmark_attr) MergeDecorators(method, enabled_method_attr, benchmark, enabled_benchmark_attr) setattr(BenchmarkSmokeTest, benchmark.Name(), method) suite.addTest(BenchmarkSmokeTest(benchmark.Name())) return suite
def _IterAllSystemHealthStoryClasses(): start_dir = os.path.dirname(os.path.abspath(__file__)) # Sort the classes by their names so that their order is stable and # deterministic. for unused_cls_name, cls in sorted(discover.DiscoverClasses( start_dir=start_dir, top_level_dir=os.path.dirname(start_dir), base_class=system_health_story.SystemHealthStory).iteritems()): yield cls
def _GetSubclasses(base_dir, cls): """ Return all subclasses of |cls| in |base_dir|. Args: cls: a class Returns: """ return discover.DiscoverClasses(base_dir, base_dir, cls, index_by_class_name=True)
def _MatchPageSetName(input_name): page_sets = [] for base_dir in config.base_paths: page_sets += discover.DiscoverClasses(base_dir, base_dir, page_set.PageSet, index_by_class_name=True).values() for p in page_sets: if input_name == p.Name(): return p return None
def current_benchmarks(): current_dir = os.path.dirname(__file__) benchmarks_dir = os.path.join(current_dir, 'benchmarks') top_level_dir = os.path.dirname(benchmarks_dir) return discover.DiscoverClasses(benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=True).values()
def _Benchmarks(environment): benchmarks = [] for search_dir in environment.benchmark_dirs: benchmarks += discover.DiscoverClasses( search_dir, environment.top_level_dir, benchmark.Benchmark, index_by_class_name=True).values() return benchmarks
def testDiscoverClassesWithPattern(self): classes = discover.DiscoverClasses( self._start_dir, self._base_dir, self._base_class, pattern='another*') actual_classes = list(cls.__name__ for cls in classes) expected_classes = ['DummyExceptionImpl1', 'DummyExceptionImpl2', 'DummyExceptionWithParameterImpl1'] self.assertItemsEqual(actual_classes, expected_classes)
def _GetAllPossiblePageTestInstances(): page_test_instances = [] measurements_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(measurements_dir) benchmarks_dir = os.path.join(top_level_dir, 'benchmarks') # Get all page test instances from measurement classes that are directly # constructible all_measurement_classes = discover.DiscoverClasses( measurements_dir, top_level_dir, legacy_page_test.LegacyPageTest, index_by_class_name=True, directly_constructable=True).values() for measurement_class in all_measurement_classes: page_test_instances.append(measurement_class()) all_benchmarks_classes = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values() # Get all page test instances from defined benchmarks. # Note: since this depends on the command line options, there is no guaranteed # that this will generate all possible page test instances but it's worth # enough for smoke test purpose. for benchmark_class in all_benchmarks_classes: options = options_for_unittests.GetCopy() parser = optparse.OptionParser() browser_options.BrowserOptions.AddCommandLineArgs(parser) try: benchmark_class.AddCommandLineArgs(parser) benchmark_module.AddCommandLineArgs(parser) benchmark_class.SetArgumentDefaults(parser) except Exception: logging.error('Exception raised when processing benchmark %s', benchmark_class) raise options.MergeDefaultValues(parser.get_default_values()) pt = benchmark_class().CreatePageTest(options) if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement): page_test_instances.append(pt) return page_test_instances
def _GetAllSystemHealthBenchmarks(): all_perf_benchmarks = discover.DiscoverClasses( path_util.GetPerfBenchmarksDir(), path_util.GetPerfDir(), benchmark_module.Benchmark, index_by_class_name=True).values() return [ b for b in all_perf_benchmarks if sys.modules[b.__module__] == system_health_benchmark ]
def testDiscoverDirectlyConstructableClasses(self): classes = discover.DiscoverClasses( self._start_dir, self._base_dir, self._base_class, directly_constructable=True) actual_classes = list(cls.__name__ for cls in classes) expected_classes = [ 'DummyException', 'DummyExceptionImpl1', 'DummyExceptionImpl2' ] self.assertItemsEqual(actual_classes, expected_classes)
def FindAllActionNames(base_dir): """Returns a set of of all action names used in our measurements.""" action_names = set() # Get all PageTests except for ProfileCreators (see crbug.com/319573) for _, cls in discover.DiscoverClasses( base_dir, base_dir, page_test.PageTest).items(): if not issubclass(cls, profile_creator.ProfileCreator): action_name = cls().action_name_to_run if action_name: action_names.add(action_name) return action_names
def testDiscoverClassesWithPatternAndIndexByModule(self): classes = discover.DiscoverClasses( self._start_dir, self._base_dir, self._base_class, pattern='another*', index_by_class_name=False) actual_classes = dict( (name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'another_discover_dummyclass': 'DummyExceptionImpl2', } self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesBasic(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class) actual_classes = dict( (name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'discover_dummyclass': 'DummyException', 'another_discover_dummyclass': 'DummyExceptionImpl2', } self.assertEqual(actual_classes, expected_classes)
def _GetSubclasses(base_dir, cls): """Returns all subclasses of |cls| in |base_dir|. Args: cls: a class Returns: dict of {underscored_class_name: benchmark class} """ return discover.DiscoverClasses(base_dir, base_dir, cls, index_by_class_name=True)
def current_benchmarks(): benchmarks_dir = os.path.join(path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmarks') top_level_dir = os.path.dirname(benchmarks_dir) all_benchmarks = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark, index_by_class_name=True).values() return sorted(all_benchmarks, key=lambda b: b.Name())
def testDiscoverClassesWithPatternAndIndexByClassName(self): classes = discover.DiscoverClasses( self._start_dir, self._base_dir, self._base_class, pattern='another*') actual_classes = dict( (name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'dummy_exception_impl1': 'DummyExceptionImpl1', 'dummy_exception_impl2': 'DummyExceptionImpl2' } self.assertEqual(actual_classes, expected_classes)
def load_tests(_, _2, _3): suite = progress_reporter.TestSuite() benchmarks_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(benchmarks_dir) measurements_dir = os.path.join(top_level_dir, 'measurements') all_measurements = discover.DiscoverClasses(measurements_dir, top_level_dir, page_test.PageTest, pattern='*.py').values() all_benchmarks = discover.DiscoverClasses(benchmarks_dir, top_level_dir, benchmark_module.Benchmark, pattern='*.py').values() for benchmark in all_benchmarks: if benchmark.PageTestClass() not in all_measurements: # If the benchmark is not in measurements, then it is not composable. # Ideally we'd like to test these as well, but the non-composable # benchmarks are usually long-running benchmarks. continue # TODO(tonyg): Smoke doesn't work with session_restore yet. if benchmark.Name().startswith('session_restore'): continue if hasattr(benchmark, 'generated_profile_archive'): # We'd like to test these, but don't know how yet. continue class BenchmarkSmokeTest(unittest.TestCase): pass setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark)) suite.addTest(BenchmarkSmokeTest(benchmark.Name())) return suite
def testDiscoverClassesByClassName(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class, index_by_class_name=True) actual_classes = dict( (name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'dummy_exception': 'DummyException', 'dummy_exception_impl1': 'DummyExceptionImpl1', 'dummy_exception_impl2': 'DummyExceptionImpl2', } self.assertEqual(actual_classes, expected_classes)
def RunSmokeTest(self, page_sets_dir, top_level_dir): """Run smoke test on all page sets in page_sets_dir. Subclass of PageSetSmokeTest is supposed to call this in some test method to run smoke test. """ page_sets = discover.DiscoverClasses(page_sets_dir, top_level_dir, page_set_module.PageSet).values() for page_set_class in page_sets: page_set = page_set_class() logging.info('Testing %s', page_set.file_path) self.CheckArchive(page_set) self.CheckCredentials(page_set) self.CheckTypes(page_set)
def testDiscoverClassesWithIndexByModuleName(self): classes = discover.DiscoverClasses(self._start_dir, self._base_dir, self._base_class, index_by_class_name=False) actual_classes = dict( (name, cls.__name__) for name, cls in classes.iteritems()) expected_classes = { 'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1', 'discover_dummyclass': 'DummyException', 'parameter_discover_dummyclass': 'DummyExceptionWithParameterImpl2' } self.assertEqual(actual_classes, expected_classes)
def Main(base_dir): measurements = discover.DiscoverClasses(base_dir, base_dir, page_measurement.PageMeasurement) options = browser_options.BrowserOptions() parser = options.CreateParser('%prog <page_set>') page_runner.AddCommandLineOptions(parser) recorder = RecordPage(measurements) recorder.AddCommandLineOptions(parser) recorder.AddOutputOptions(parser) _, args = parser.parse_args() if len(args) != 1: parser.print_usage() sys.exit(1) ps = page_set.PageSet.FromFile(args[0]) # Set the archive path to something temporary. temp_target_wpr_file_path = tempfile.mkstemp()[1] ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path) # Do the actual recording. options.wpr_mode = wpr_modes.WPR_RECORD options.no_proxy_server = True recorder.CustomizeBrowserOptions(options) results = page_runner.Run(recorder, ps, options) if results.errors or results.failures: logging.warning( 'Some pages failed. The recording has not been updated for ' 'these pages.') logging.warning('Failed pages:\n%s', '\n'.join(zip(*results.errors + results.failures)[0])) if results.skipped: logging.warning('Some pages were skipped. The recording has not been ' 'updated for these pages.') logging.warning('Skipped pages:\n%s', '\n'.join(zip(*results.skipped)[0])) if results.successes: # Update the metadata for the pages which were recorded. ps.wpr_archive_info.AddRecordedPages(results.successes) else: os.remove(temp_target_wpr_file_path) return min(255, len(results.failures))
def Run(project_config, test_run_options, args): binary_manager.InitDependencyManager(project_config.client_configs) parser = argparse.ArgumentParser(description='Run a browser test suite') parser.add_argument('test', type=str, help='Name of the test suite to run') parser.add_argument( '--write-abbreviated-json-results-to', metavar='FILENAME', action='store', help=( 'If specified, writes the full results to that path in json form.' )) option, extra_args = parser.parse_known_args(args) for start_dir in project_config.start_dirs: modules_to_classes = discover.DiscoverClasses( start_dir, project_config.top_level_dir, base_class=serially_executed_browser_test_case. SeriallyBrowserTestCase) browser_test_classes = modules_to_classes.values() ValidateDistinctNames(browser_test_classes) test_class = None for cl in browser_test_classes: if cl.Name() == option.test: test_class = cl if not test_class: print 'Cannot find test class with name matched %s' % option.test print 'Available tests: %s' % '\n'.join(cl.Name() for cl in browser_test_classes) return 1 options = ProcessCommandLineOptions(test_class, extra_args) suite = unittest.TestSuite() for test in LoadTests(test_class, options): suite.addTest(test) results = unittest.TextTestRunner( verbosity=test_run_options.verbosity).run(suite) if option.write_abbreviated_json_results_to: with open(option.write_abbreviated_json_results_to, 'w') as f: json_results = {'failures': [], 'valid': True} for (failed_test_case, _) in results.failures: json_results['failures'].append(failed_test_case.id()) json.dump(json_results, f) return len(results.failures)
def _DiscoverProfileCreatorClasses(): profile_creators_dir = os.path.abspath( os.path.join(util.GetBaseDir(), os.pardir, 'perf', 'profile_creators')) base_dir = os.path.abspath(os.path.join(profile_creators_dir, os.pardir)) profile_creators_unfiltered = discover.DiscoverClasses( profile_creators_dir, base_dir, profile_creator.ProfileCreator) # Remove '_creator' suffix from keys. profile_creators = {} for test_name, test_class in profile_creators_unfiltered.iteritems(): assert test_name.endswith('_creator') test_name = test_name[:-len('_creator')] profile_creators[test_name] = test_class return profile_creators