def GenerateBenchmarkOptions(benchmark_class): # Set the benchmark's default arguments. options = options_for_unittests.GetCopy() options.output_format = 'none' parser = options.CreateParser() # TODO(nednguyen): probably this logic of setting up the benchmark options # parser & processing the options should be sharable with telemetry's # core. benchmark_class.AddCommandLineArgs(parser) benchmark_module.AddCommandLineArgs(parser) benchmark_class.SetArgumentDefaults(parser) options.MergeDefaultValues(parser.get_default_values()) benchmark_class.ProcessCommandLineArgs(None, options) benchmark_module.ProcessCommandLineArgs(None, options) # Only measure a single story so that this test cycles reasonably quickly. options.pageset_repeat = 1 options.page_repeat = 1 return options
def BenchmarkSmokeTest(self): # Only measure a single page so that this test cycles reasonably quickly. benchmark.options['pageset_repeat'] = 1 benchmark.options['page_repeat'] = 1 class SinglePageBenchmark(benchmark): # pylint: disable=no-init def CreateStorySet(self, options): # pylint: disable=super-on-old-class story_set = super(SinglePageBenchmark, self).CreateStorySet(options) # Only smoke test the first story since smoke testing everything takes # too long. for s in story_set.stories[num_pages:]: story_set.RemoveStory(s) return story_set # Set the benchmark's default arguments. options = options_for_unittests.GetCopy() options.output_formats = ['none'] parser = options.CreateParser() benchmark.AddCommandLineArgs(parser) benchmark_module.AddCommandLineArgs(parser) benchmark.SetArgumentDefaults(parser) options.MergeDefaultValues(parser.get_default_values()) benchmark.ProcessCommandLineArgs(None, options) benchmark_module.ProcessCommandLineArgs(None, options) possible_browser = browser_finder.FindBrowser(options) if SinglePageBenchmark.ShouldDisable(possible_browser): self.skipTest('Benchmark %s has ShouldDisable return True' % SinglePageBenchmark.Name()) self.assertEqual(0, SinglePageBenchmark().Run(options), msg='Failed: %s' % benchmark)