示例#1
0
def GenerateBenchmarkOptions(benchmark_class):
    # Set the benchmark's default arguments.
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    parser = options.CreateParser()

    # TODO(nednguyen): probably this logic of setting up the benchmark options
    # parser & processing the options should be sharable with telemetry's
    # core.
    benchmark_class.AddCommandLineArgs(parser)
    benchmark_module.AddCommandLineArgs(parser)
    benchmark_class.SetArgumentDefaults(parser)
    options.MergeDefaultValues(parser.get_default_values())

    benchmark_class.ProcessCommandLineArgs(None, options)
    benchmark_module.ProcessCommandLineArgs(None, options)
    # Only measure a single story so that this test cycles reasonably quickly.
    options.pageset_repeat = 1

    # Enable browser logging in the smoke test only. Hopefully, this will detect
    # all crashes and hence remove the need to enable logging in actual perf
    # benchmarks.
    options.browser_options.logging_verbosity = 'non-verbose'
    options.target_platforms = benchmark_class.GetSupportedPlatformNames(
        benchmark_class.SUPPORTED_PLATFORMS)
    return options
示例#2
0
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1
        benchmark.options['page_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=W0232
            def CreatePageSet(self, options):
                # pylint: disable=E1002
                ps = super(SinglePageBenchmark, self).CreatePageSet(options)
                for p in ps.pages:
                    p.skip_waits = True
                    ps.user_stories = [p]
                    break
                return ps

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_format = 'none'
        options.suppress_gtest_report = True
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
示例#3
0
    def ProcessCommandLineArgs(cls, parser, args):
        if not args.positional_args:
            possible_browser = (browser_finder.FindBrowser(args)
                                if args.browser_type else None)
            _PrintBenchmarkList(_Benchmarks(), possible_browser)
            sys.exit(-1)

        input_benchmark_name = args.positional_args[0]
        matching_benchmarks = _MatchBenchmarkName(input_benchmark_name)
        if not matching_benchmarks:
            print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
            print >> sys.stderr
            _PrintBenchmarkList(_Benchmarks(), None)
            sys.exit(-1)

        if len(matching_benchmarks) > 1:
            print >> sys.stderr, ('Multiple benchmarks named "%s".' %
                                  input_benchmark_name)
            print >> sys.stderr, 'Did you mean one of these?'
            print >> sys.stderr
            _PrintBenchmarkList(matching_benchmarks, None)
            sys.exit(-1)

        benchmark_class = matching_benchmarks.pop()
        if len(args.positional_args) > 1:
            parser.error('Too many arguments.')

        assert issubclass(
            benchmark_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        benchmark.ProcessCommandLineArgs(parser, args)
        benchmark_class.ProcessCommandLineArgs(parser, args)

        cls._benchmark = benchmark_class
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1
        benchmark.options['page_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)
                for story in story_set.stories:
                    story.skip_waits = True
                    story_set.stories = [story]
                    break
                return story_set

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_format = 'none'
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
示例#5
0
    def ProcessCommandLineArgs(cls, parser, args, environment):
        benchmark_class = cls._FindBenchmark(parser, args, environment)

        benchmark.ProcessCommandLineArgs(parser, args)
        benchmark_class.ProcessCommandLineArgs(parser, args)

        cls._benchmark = benchmark_class
示例#6
0
    def BenchmarkSmokeTest(self):
        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            # Only measure a single page so that this test cycles reasonably quickly.
            options = benchmark.options.copy()
            options['pageset_repeat'] = 1

            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)
                return story_set

        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
            SinglePageBenchmark.enable_smoke_test_mode = True

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        parser = options.CreateParser()

        SinglePageBenchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        SinglePageBenchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        # Prevent benchmarks from accidentally trying to upload too much data to the
        # chromeperf dashboard. The number of values uploaded is equal to (the
        # average number of values produced by a single story) * (1 + (the number of
        # stories)). The "1 + " accounts for values summarized across all stories.
        # We can approximate "the average number of values produced by a single
        # story" as the number of values produced by the first story.
        # pageset_repeat doesn't matter because values are summarized across
        # repetitions before uploading.
        story_set = benchmark().CreateStorySet(options)
        SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(
            story_set.stories)

        SinglePageBenchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        single_page_benchmark = SinglePageBenchmark()
        with open(path_util.GetExpectationsPath()) as fp:
            single_page_benchmark.AugmentExpectationsWithParser(fp.read())

        return_code = single_page_benchmark.Run(options)
        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
示例#7
0
    def ProcessCommandLineArgs(cls, parser, args):
        if not args.positional_args:
            _PrintTestList(_Tests())
            sys.exit(-1)

        input_test_name = args.positional_args[0]
        matching_tests = _MatchTestName(input_test_name)
        if not matching_tests:
            print >> sys.stderr, 'No test named "%s".' % input_test_name
            print >> sys.stderr
            _PrintTestList(_Tests())
            sys.exit(-1)

        if len(matching_tests) > 1:
            print >> sys.stderr, 'Multiple tests named "%s".' % input_test_name
            print >> sys.stderr, 'Did you mean one of these?'
            print >> sys.stderr
            _PrintTestList(matching_tests)
            sys.exit(-1)

        test_class = matching_tests.pop()
        if issubclass(test_class, page_test.PageTest):
            if len(args.positional_args) < 2:
                parser.error('Need to specify a page set for "%s".' %
                             test_class.Name())
            if len(args.positional_args) > 2:
                parser.error('Too many arguments.')
            page_set_path = args.positional_args[1]
            if not os.path.exists(page_set_path):
                parser.error('Page set not found.')
            if not (os.path.isfile(page_set_path)
                    and discover.IsPageSetFile(page_set_path)):
                parser.error('Unsupported page set file format.')

            class TestWrapper(benchmark.Benchmark):
                test = test_class

                @classmethod
                def CreatePageSet(cls, options):
                    return page_set.PageSet.FromFile(page_set_path)

            test_class = TestWrapper
        else:
            if len(args.positional_args) > 1:
                parser.error('Too many arguments.')

        assert issubclass(
            test_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        benchmark.ProcessCommandLineArgs(parser, args)
        test_class.ProcessCommandLineArgs(parser, args)

        cls._test = test_class
  def ProcessCommandLineArgs(cls, parser, args):
    if not args.positional_args:
      _PrintTestList(_Tests())
      sys.exit(-1)

    input_test_name = args.positional_args[0]
    matching_tests = _MatchTestName(input_test_name)
    if not matching_tests:
      print >> sys.stderr, 'No test named "%s".' % input_test_name
      print >> sys.stderr
      _PrintTestList(_Tests())
      sys.exit(-1)

    if len(matching_tests) > 1:
      print >> sys.stderr, 'Multiple tests named "%s".' % input_test_name
      print >> sys.stderr, 'Did you mean one of these?'
      print >> sys.stderr
      _PrintTestList(matching_tests)
      sys.exit(-1)

    test_class = matching_tests.pop()
    if issubclass(test_class, page_test.PageTest):
      if len(args.positional_args) < 2:
        parser.error('Need to specify a page set for "%s".' % test_class.Name())
      if len(args.positional_args) > 2:
        parser.error('Too many arguments.')
      page_set_name = args.positional_args[1]
      page_set_class = _MatchPageSetName(page_set_name)
      if page_set_class is None:
        parser.error("Page set %s not found. Available sets:\n%s" %
                     (page_set_name, _AvailablePageSetNamesString()))

      class TestWrapper(benchmark.Benchmark):
        test = test_class

        @classmethod
        def CreatePageSet(cls, options):
          return page_set_class()

      test_class = TestWrapper
    else:
      if len(args.positional_args) > 1:
        parser.error('Too many arguments.')

    assert issubclass(test_class, benchmark.Benchmark), (
        'Trying to run a non-Benchmark?!')

    benchmark.ProcessCommandLineArgs(parser, args)
    test_class.ProcessCommandLineArgs(parser, args)

    cls._test = test_class
示例#9
0
    def ProcessCommandLineArgs(cls, parser, options, environment):
        all_benchmarks = _Benchmarks(environment)
        if environment.expectations_files:
            assert len(environment.expectations_files) == 1
            expectations_file = environment.expectations_files[0]
        else:
            expectations_file = None
        if not options.positional_args:
            possible_browser = (browser_finder.FindBrowser(options)
                                if options.browser_type else None)
            PrintBenchmarkList(all_benchmarks, possible_browser,
                               expectations_file)
            sys.exit(-1)

        input_benchmark_name = options.positional_args[0]
        matching_benchmarks = _MatchBenchmarkName(input_benchmark_name,
                                                  environment)
        if not matching_benchmarks:
            print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
            print >> sys.stderr
            most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
                all_benchmarks, input_benchmark_name, lambda x: x.Name())
            if most_likely_matched_benchmarks:
                print >> sys.stderr, 'Do you mean any of those benchmarks below?'
                PrintBenchmarkList(most_likely_matched_benchmarks, None,
                                   expectations_file, sys.stderr)
            sys.exit(-1)

        if len(matching_benchmarks) > 1:
            print >> sys.stderr, ('Multiple benchmarks named "%s".' %
                                  input_benchmark_name)
            print >> sys.stderr, 'Did you mean one of these?'
            print >> sys.stderr
            PrintBenchmarkList(matching_benchmarks, None, expectations_file,
                               sys.stderr)
            sys.exit(-1)

        benchmark_class = matching_benchmarks.pop()
        if len(options.positional_args) > 1:
            parser.error('Too many arguments.')

        assert issubclass(
            benchmark_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        benchmark.ProcessCommandLineArgs(parser, options)
        benchmark_class.ProcessCommandLineArgs(parser, options)

        cls._benchmark = benchmark_class
        cls._expectations_path = expectations_file
示例#10
0
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)
                return story_set

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        # Prevent benchmarks from accidentally trying to upload too much data to the
        # chromeperf dashboard. The number of values uploaded is equal to (the
        # average number of values produced by a single story) * (1 + (the number of
        # stories)). The "1 + " accounts for values summarized across all stories.
        # We can approximate "the average number of values produced by a single
        # story" as the number of values produced by the first story.
        # pageset_repeat doesn't matter because values are summarized across
        # repetitions before uploading.
        story_set = benchmark().CreateStorySet(options)
        SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(
            story_set.stories)

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        possible_browser = browser_finder.FindBrowser(options)
        if SinglePageBenchmark.ShouldDisable(possible_browser):
            self.skipTest('Benchmark %s has ShouldDisable return True' %
                          SinglePageBenchmark.Name())

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
示例#11
0
 def setupBenchmark(self):  # pylint: disable=invalid-name
     finder_options = fakes.CreateBrowserFinderOptions()
     finder_options.browser_options.platform = fakes.FakeLinuxPlatform()
     finder_options.output_formats = ['none']
     finder_options.suppress_gtest_report = True
     finder_options.output_dir = None
     finder_options.upload_bucket = 'public'
     finder_options.upload_results = False
     benchmarkclass = FakeBenchmark
     parser = finder_options.CreateParser()
     benchmark_module.AddCommandLineArgs(parser)
     benchmarkclass.AddCommandLineArgs(parser)
     options, _ = parser.parse_args([])
     benchmark_module.ProcessCommandLineArgs(parser, options)
     benchmarkclass.ProcessCommandLineArgs(parser, options)
     benchmark = benchmarkclass()
     return benchmark, finder_options
示例#12
0
 def setupTest(self):
     finder_options = fakes.CreateBrowserFinderOptions()
     finder_options.browser_options.platform = fakes.FakeLinuxPlatform()
     finder_options.output_formats = ['none']
     finder_options.suppress_gtest_report = True
     finder_options.output_dir = None
     finder_options.upload_bucket = 'public'
     finder_options.upload_results = False
     testclass = FakeTest
     parser = finder_options.CreateParser()
     benchmark.AddCommandLineArgs(parser)
     testclass.AddCommandLineArgs(parser)
     options, dummy_args = parser.parse_args([])
     benchmark.ProcessCommandLineArgs(parser, options)
     testclass.ProcessCommandLineArgs(parser, options)
     test = testclass()
     return test, finder_options
示例#13
0
    def ProcessCommandLineArgs(cls, parser, args, environment):
        all_benchmarks = _Benchmarks(environment)
        if not args.positional_args:
            possible_browser = (browser_finder.FindBrowser(args)
                                if args.browser_type else None)
            PrintBenchmarkList(all_benchmarks, possible_browser)
            sys.exit(-1)

        input_benchmark_name = args.positional_args[0]
        matching_benchmarks = _MatchBenchmarkName(input_benchmark_name,
                                                  environment)
        if not matching_benchmarks:
            print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
            print >> sys.stderr
            most_likely_matched_benchmarks = GetMostLikelyMatchedBenchmarks(
                all_benchmarks, input_benchmark_name)
            if most_likely_matched_benchmarks:
                print >> sys.stderr, 'Do you mean any of those benchmarks below?'
                PrintBenchmarkList(most_likely_matched_benchmarks, None,
                                   sys.stderr)
            sys.exit(-1)

        if len(matching_benchmarks) > 1:
            print >> sys.stderr, ('Multiple benchmarks named "%s".' %
                                  input_benchmark_name)
            print >> sys.stderr, 'Did you mean one of these?'
            print >> sys.stderr
            PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
            sys.exit(-1)

        benchmark_class = matching_benchmarks.pop()
        if len(args.positional_args) > 1:
            parser.error('Too many arguments.')

        assert issubclass(
            benchmark_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        benchmark.ProcessCommandLineArgs(parser, args)
        benchmark_class.ProcessCommandLineArgs(parser, args)

        cls._benchmark = benchmark_class
def GenerateBenchmarkOptions(benchmark_class):
    # Set the benchmark's default arguments.
    options = options_for_unittests.GetCopy()
    options.output_format = 'none'
    parser = options.CreateParser()

    # TODO(nednguyen): probably this logic of setting up the benchmark options
    # parser & processing the options should be sharable with telemetry's
    # core.
    benchmark_class.AddCommandLineArgs(parser)
    benchmark_module.AddCommandLineArgs(parser)
    benchmark_class.SetArgumentDefaults(parser)
    options.MergeDefaultValues(parser.get_default_values())

    benchmark_class.ProcessCommandLineArgs(None, options)
    benchmark_module.ProcessCommandLineArgs(None, options)
    # Only measure a single story so that this test cycles reasonably quickly.
    options.pageset_repeat = 1
    options.page_repeat = 1
    return options
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1
        benchmark.options['page_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)
                return story_set

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        possible_browser = browser_finder.FindBrowser(options)
        if SinglePageBenchmark.ShouldDisable(possible_browser):
            self.skipTest('Benchmark %s has ShouldDisable return True' %
                          SinglePageBenchmark.Name())

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
示例#16
0
    def ProcessCommandLineArgs(cls, parser, options, environment):
        all_benchmarks = environment.GetBenchmarks()
        if environment.expectations_files:
            assert len(environment.expectations_files) == 1
            expectations_file = environment.expectations_files[0]
        else:
            expectations_file = None
        if not options.positional_args:
            possible_browser = (browser_finder.FindBrowser(options)
                                if options.browser_type else None)
            PrintBenchmarkList(all_benchmarks, possible_browser,
                               expectations_file)
            parser.error('missing required argument: benchmark_name')

        benchmark_name = options.positional_args[0]
        benchmark_class = environment.GetBenchmarkByName(benchmark_name)
        if benchmark_class is None:
            most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
                all_benchmarks, benchmark_name, lambda x: x.Name())
            if most_likely_matched_benchmarks:
                print >> sys.stderr, 'Do you mean any of those benchmarks below?'
                PrintBenchmarkList(most_likely_matched_benchmarks, None,
                                   expectations_file, sys.stderr)
            parser.error('no such benchmark: %s' % benchmark_name)

        if len(options.positional_args) > 1:
            parser.error('unrecognized arguments: %s' %
                         ' '.join(options.positional_args[1:]))

        assert issubclass(
            benchmark_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        benchmark.ProcessCommandLineArgs(parser, options)
        benchmark_class.ProcessCommandLineArgs(parser, options)

        cls._benchmark = benchmark_class
        cls._expectations_path = expectations_file