def BenchmarkSmokeTest(self): # Some benchmarks are running multiple iterations # which is not needed for a smoke test if hasattr(benchmark_class, 'enable_smoke_test_mode'): benchmark_class.enable_smoke_test_mode = True with tempfile_ext.NamedTemporaryDirectory() as temp_dir: options = testing.GetRunOptions( output_dir=temp_dir, benchmark_cls=benchmark_class, overrides={'story_shard_end_index': num_pages}, environment=chromium_config.GetDefaultChromiumConfig()) options.pageset_repeat = 1 # For smoke testing only run the page once. options.output_formats = ['histograms'] options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE results_processor.ProcessOptions(options) return_code = benchmark_class().Run(options) # TODO(crbug.com/1019139): Make 111 be the exit code that means # "no stories were run.". if return_code in (-1, 111): self.skipTest('The benchmark was not run.') self.assertEqual(return_code, 0, msg='Benchmark run failed: %s' % benchmark_class.Name()) return_code = results_processor.ProcessResults(options) self.assertEqual(return_code, 0, msg='Result processing failed: %s' % benchmark_class.Name())
def main(config, args=None): options = command_line.ParseArgs( environment=config, args=args, results_arg_parser=results_processor.ArgumentParser()) results_processor.ProcessOptions(options) run_return_code = command_line.RunCommand(options) process_return_code = results_processor.ProcessResults(options) return max(run_return_code, process_return_code)
def main(config, args=None): results_arg_parser = results_processor.ArgumentParser() options = command_line.ParseArgs(environment=config, args=args, results_arg_parser=results_arg_parser) results_processor.ProcessOptions(options) run_return_code = command_line.RunCommand(options) process_return_code = results_processor.ProcessResults(options) if process_return_code != 0: return process_return_code else: return run_return_code
def main(args): parser = argparse.ArgumentParser( parents=[results_processor.ArgumentParser()]) parser.add_argument('executable', help='The name of the executable to run.') options, leftover_args = parser.parse_known_args(args) results_processor.ProcessOptions(options, standalone=True) run_return_code = RunGTest(options, leftover_args) process_return_code = ProcessResults(options) if process_return_code != 0: return process_return_code else: return run_return_code
def GenerateBenchmarkOptions(output_dir, benchmark_cls): options = testing.GetRunOptions( output_dir=output_dir, benchmark_cls=benchmark_cls, overrides={'run_full_story_set': True}, environment=chromium_config.GetDefaultChromiumConfig()) options.pageset_repeat = 1 # For smoke testing only run each page once. options.output_formats = ['histograms'] options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE # Enable browser logging in the smoke test only. Hopefully, this will detect # all crashes and hence remove the need to enable logging in actual perf # benchmarks. options.browser_options.logging_verbosity = 'non-verbose' options.target_platforms = benchmark_cls.GetSupportedPlatformNames( benchmark_cls.SUPPORTED_PLATFORMS) results_processor.ProcessOptions(options) return options
def setUp(self): self.options = testing.GetRunOptions(output_dir=tempfile.mkdtemp()) self.options.output_formats = ['histograms'] results_processor.ProcessOptions(self.options)