def ProcessCommandLineArgs(cls, parser, options, environment): all_benchmarks = environment.GetBenchmarks() if not options.positional_args: possible_browser = (browser_finder.FindBrowser(options) if options.browser_type else None) PrintBenchmarkList(all_benchmarks, possible_browser) parser.error('missing required argument: benchmark_name') benchmark_name = options.positional_args[0] benchmark_class = environment.GetBenchmarkByName(benchmark_name) if benchmark_class is None: most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject( all_benchmarks, benchmark_name, lambda x: x.Name()) if most_likely_matched_benchmarks: print('Do you mean any of those benchmarks below?', file=sys.stderr) PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr) parser.error('no such benchmark: %s' % benchmark_name) if len(options.positional_args) > 1: parser.error('unrecognized arguments: %s' % ' '.join(options.positional_args[1:])) assert issubclass( benchmark_class, benchmark.Benchmark), ('Trying to run a non-Benchmark?!') story_runner.ProcessCommandLineArgs(parser, options, environment) benchmark_class.ProcessCommandLineArgs(parser, options) cls._benchmark = benchmark_class
def ProcessCommandLineArgs(cls, parser, options, extra_args, environment): del environment # unused for arg in extra_args: if arg == '--browser' or arg.startswith('--browser='): parser.error( '--browser=... is not allowed when running trybot.') all_benchmarks = discover.DiscoverClasses( start_dir=path_util.GetPerfBenchmarksDir(), top_level_dir=path_util.GetPerfDir(), base_class=benchmark.Benchmark).values() all_benchmark_names = [b.Name() for b in all_benchmarks] all_benchmarks_by_names = {b.Name(): b for b in all_benchmarks} benchmark_class = all_benchmarks_by_names.get(options.benchmark_name, None) if not benchmark_class: possible_benchmark_names = matching.GetMostLikelyMatchedObject( all_benchmark_names, options.benchmark_name) parser.error( 'No benchmark named "%s". Do you mean any of those benchmarks ' 'below?\n%s' % (options.benchmark_name, '\n'.join(possible_benchmark_names))) is_benchmark_disabled, reason = cls.IsBenchmarkDisabledOnTrybotPlatform( benchmark_class, options.trybot) also_run_disabled_option = '--also-run-disabled-tests' if is_benchmark_disabled and also_run_disabled_option not in extra_args: parser.error('%s To run the benchmark on trybot anyway, add ' '%s option.' % (reason, also_run_disabled_option))
def _Impl(all_items, category_name): candidates = matching.GetMostLikelyMatchedObject( all_items.iteritems(), target, name_func=lambda kv: kv[1].Name()) if candidates: sys.stderr.write('\nDo you mean any of those %s below?\n' % category_name) _PrintPairs([(k, v.Description()) for k, v in candidates], sys.stderr) return True return False
def testGetMostLikelyMatchedObject(self): # Test moved from telemetry/benchmark_runner_unittest.py all_benchmarks = [BenchmarkFoo, BenchmarkBar, UnusualBenchmark] self.assertEquals([BenchmarkFoo, BenchmarkBar], matching.GetMostLikelyMatchedObject( all_benchmarks, 'BenchmarkFooz', name_func=lambda x: x.Name())) self.assertEquals([BenchmarkBar, BenchmarkFoo], matching.GetMostLikelyMatchedObject( all_benchmarks, 'BarBenchmark', name_func=lambda x: x.Name())) self.assertEquals([UnusualBenchmark], matching.GetMostLikelyMatchedObject( all_benchmarks, 'unusual', name_func=lambda x: x.Name()))
def ProcessCommandLineArgs(cls, parser, options, environment): all_benchmarks = _Benchmarks(environment) if environment.expectations_files: assert len(environment.expectations_files) == 1 expectations_file = environment.expectations_files[0] else: expectations_file = None if not options.positional_args: possible_browser = (browser_finder.FindBrowser(options) if options.browser_type else None) PrintBenchmarkList(all_benchmarks, possible_browser, expectations_file) sys.exit(-1) input_benchmark_name = options.positional_args[0] matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment) if not matching_benchmarks: print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name print >> sys.stderr most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject( all_benchmarks, input_benchmark_name, lambda x: x.Name()) if most_likely_matched_benchmarks: print >> sys.stderr, 'Do you mean any of those benchmarks below?' PrintBenchmarkList(most_likely_matched_benchmarks, None, expectations_file, sys.stderr) sys.exit(-1) if len(matching_benchmarks) > 1: print >> sys.stderr, ('Multiple benchmarks named "%s".' % input_benchmark_name) print >> sys.stderr, 'Did you mean one of these?' print >> sys.stderr PrintBenchmarkList(matching_benchmarks, None, expectations_file, sys.stderr) sys.exit(-1) benchmark_class = matching_benchmarks.pop() if len(options.positional_args) > 1: parser.error('Too many arguments.') assert issubclass( benchmark_class, benchmark.Benchmark), ('Trying to run a non-Benchmark?!') benchmark.ProcessCommandLineArgs(parser, options) benchmark_class.ProcessCommandLineArgs(parser, options) cls._benchmark = benchmark_class cls._expectations_path = expectations_file
def ProcessCommandLineArgs(cls, parser, options, extra_args, environment): del environment # unused for arg in extra_args: if arg == '--browser' or arg.startswith('--browser='): parser.error( '--browser=... is not allowed when running trybot.') all_benchmarks = discover.DiscoverClasses( start_dir=path_util.GetPerfBenchmarksDir(), top_level_dir=path_util.GetPerfDir(), base_class=benchmark.Benchmark).values() all_benchmark_names = [b.Name() for b in all_benchmarks] if options.benchmark_name not in all_benchmark_names: possible_benchmark_names = matching.GetMostLikelyMatchedObject( all_benchmark_names, options.benchmark_name) parser.error( 'No benchmark named "%s". Do you mean any of those benchmarks ' 'below?\n%s' % (options.benchmark_name, '\n'.join(possible_benchmark_names)))
def _FindBenchmark(cls, parser, args, environment): all_benchmarks = _Benchmarks(environment) if not args.positional_args: possible_browser = (browser_finder.FindBrowser(args) if args.browser_type else None) PrintBenchmarkList(all_benchmarks, possible_browser) sys.exit(-1) input_benchmark_name = args.positional_args[0] matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment) if not matching_benchmarks: print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name print >> sys.stderr most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject( all_benchmarks, input_benchmark_name, lambda x: x.Name()) if most_likely_matched_benchmarks: print >> sys.stderr, 'Do you mean any of those benchmarks below?' PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr) sys.exit(-1) if len(matching_benchmarks) > 1: print >> sys.stderr, ('Multiple benchmarks named "%s".' % input_benchmark_name) print >> sys.stderr, 'Did you mean one of these?' print >> sys.stderr PrintBenchmarkList(matching_benchmarks, None, sys.stderr) sys.exit(-1) benchmark_class = matching_benchmarks.pop() if len(args.positional_args) > 1: parser.error('Too many arguments.') assert issubclass( benchmark_class, benchmark.Benchmark), ('Trying to run a non-Benchmark?!') return benchmark_class