Ejemplo n.º 1
0
def GenerateSystemHealthCSV(file_path):
    system_health_stories = list(IterAllSystemHealthStories())

    e = expectations.StoryExpectations()
    with open(path_util.GetExpectationsPath()) as fp:
        parser = expectations_parser.TestExpectationParser(fp.read())

    benchmarks = [
        'system_health.common_desktop', 'system_health.common_mobile',
        'system_health.memory_desktop', 'system_health.memory_mobile'
    ]
    for benchmark in benchmarks:
        e.GetBenchmarkExpectationsFromParser(parser.expectations, benchmark)

    disabed_platforms = PopulateExpectations([e.AsDict()['stories']])

    system_health_stories.sort(key=lambda s: s.name)
    with open(file_path, 'w') as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow(
            ['Story name', 'Platform', 'Description', 'Disabled Platforms'])
        for s in system_health_stories:
            p = s.SUPPORTED_PLATFORMS
            if len(p) == 2:
                p = 'all'
            else:
                p = list(p)[0]
            if s.name in disabed_platforms:
                csv_writer.writerow([
                    s.name, p,
                    s.GetStoryDescription(), disabed_platforms[s.name]
                ])
            else:
                csv_writer.writerow([s.name, p, s.GetStoryDescription(), " "])
    return 0
Ejemplo n.º 2
0
def GetDefaultChromiumConfig():
    return ChromiumConfig(benchmark_dirs=[
        path_util.GetOfficialBenchmarksDir(),
        path_util.GetContribDir()
    ],
                          top_level_dir=path_util.GetPerfDir(),
                          expectations_files=[path_util.GetExpectationsPath()])
Ejemplo n.º 3
0
    def RunTest(self):
        class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # We want to prevent benchmarks from accidentally trying to upload too
                # much data to the chrome perf dashboard. So this tests tries to
                # estimate the amount of values that the benchmark _would_ create when
                # running on the waterfall, and fails if too many values are produced.
                # As we run a single story and not the whole benchmark, the number of
                # max values allowed is scaled proportionally.
                # TODO(crbug.com/981349): This logic is only really valid for legacy
                # values, and does not take histograms into account. An alternative
                # should be implemented when using the results processor.
                type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)

                stories_to_remove = [
                    s for s in story_set.stories if s != story_to_smoke_test
                ]
                for s in stories_to_remove:
                    story_set.RemoveStory(s)
                assert story_set.stories
                return story_set

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            # Set the benchmark's default arguments.
            options = GenerateBenchmarkOptions(
                output_dir=temp_dir, benchmark_cls=SinglePageBenchmark)
            possible_browser = browser_finder.FindBrowser(options)
            if possible_browser is None:
                self.skipTest('Cannot find the browser to run the test.')

            simplified_test_name = self.id().replace(
                'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
                '')

            # Sanity check to ensure that that substring removal was effective.
            assert len(simplified_test_name) < len(self.id())

            if (simplified_test_name in _DISABLED_TESTS
                    and not options.run_disabled_tests):
                self.skipTest('Test is explicitly disabled')

            single_page_benchmark = SinglePageBenchmark()
            # TODO(crbug.com/985103): Remove this code once
            # AugmentExpectationsWithFile is deleted and replaced with functionality
            # in story_filter.py.
            if hasattr(single_page_benchmark, 'AugmentExpectationsWithFile'):
                with open(path_util.GetExpectationsPath()) as fp:
                    single_page_benchmark.AugmentExpectationsWithFile(
                        fp.read())

            return_code = single_page_benchmark.Run(options)

        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark_class)
Ejemplo n.º 4
0
def main():
    config = chromium_config.ChromiumConfig(
        benchmark_dirs=[
            path_util.GetOfficialBenchmarksDir(),
            path_util.GetContribDir()
        ],
        top_level_dir=path_util.GetPerfDir(),
        expectations_files=[path_util.GetExpectationsPath()])
    return benchmark_runner.main(config)
Ejemplo n.º 5
0
    def BenchmarkSmokeTest(self):
        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            # Only measure a single page so that this test cycles reasonably quickly.
            options = benchmark.options.copy()
            options['pageset_repeat'] = 1

            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)
                return story_set

        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
            SinglePageBenchmark.enable_smoke_test_mode = True

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        parser = options.CreateParser()

        SinglePageBenchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        SinglePageBenchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        # Prevent benchmarks from accidentally trying to upload too much data to the
        # chromeperf dashboard. The number of values uploaded is equal to (the
        # average number of values produced by a single story) * (1 + (the number of
        # stories)). The "1 + " accounts for values summarized across all stories.
        # We can approximate "the average number of values produced by a single
        # story" as the number of values produced by the first story.
        # pageset_repeat doesn't matter because values are summarized across
        # repetitions before uploading.
        story_set = benchmark().CreateStorySet(options)
        SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(
            story_set.stories)

        SinglePageBenchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        single_page_benchmark = SinglePageBenchmark()
        with open(path_util.GetExpectationsPath()) as fp:
            single_page_benchmark.AugmentExpectationsWithParser(fp.read())

        return_code = single_page_benchmark.Run(options)
        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
Ejemplo n.º 6
0
def main():
    benchmarks = benchmark_finders.GetAllBenchmarks()
    with open(path_util.GetExpectationsPath()) as fp:
        raw_expectations_data = fp.read()
    test_expectations = typ_expectations_parser.TestExpectations()
    ret, msg = test_expectations.parse_tagged_list(raw_expectations_data)
    if ret:
        logging.error(msg)
        return ret
    validate_story_names(benchmarks, test_expectations)
    validate_expectations_component_tags(test_expectations)
    return 0
Ejemplo n.º 7
0
    def BenchmarkSmokeTest(self):
        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # We want to prevent benchmarks from accidentally trying to upload too
                # much data to the chrome perf dashboard. So this tests tries to
                # estimate the amount of values that the benchmark _would_ create when
                # running on the waterfall, and fails if too many values are produced.
                # As we run a single story and not the whole benchmark, the number of
                # max values allowed is scaled proportionally.
                # TODO(crbug.com/981349): This logic is only really valid for legacy
                # values, and does not take histograms into account. An alternative
                # should be implemented when using the results processor.
                type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)

                return story_set

        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
            SinglePageBenchmark.enable_smoke_test_mode = True

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            # Set the benchmark's default arguments.
            options = options_for_unittests.GetRunOptions(
                output_dir=temp_dir,
                benchmark_cls=SinglePageBenchmark,
                environment=chromium_config.GetDefaultChromiumConfig())
            options.pageset_repeat = 1  # For smoke testing only run the page once.

            single_page_benchmark = SinglePageBenchmark()
            # TODO(crbug.com/985103): Remove this code once
            # AugmentExpectationsWithFile is deleted and replaced with functionality
            # in story_filter.py.
            if hasattr(single_page_benchmark, 'AugmentExpectationsWithFile'):
                with open(path_util.GetExpectationsPath()) as fp:
                    single_page_benchmark.AugmentExpectationsWithFile(
                        fp.read())

            return_code = single_page_benchmark.Run(options)

        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
Ejemplo n.º 8
0
    def RunTest(self):
        class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)
                stories_to_remove = [
                    s for s in story_set.stories if s != story_to_smoke_test
                ]
                for s in stories_to_remove:
                    story_set.RemoveStory(s)
                assert story_set.stories
                return story_set

        options = GenerateBenchmarkOptions(benchmark_class)

        # Prevent benchmarks from accidentally trying to upload too much data to the
        # chromeperf dashboard. The number of values uploaded is equal to (the
        # average number of values produced by a single story) * (1 + (the number of
        # stories)). The "1 + " accounts for values summarized across all stories.
        # We can approximate "the average number of values produced by a single
        # story" as the number of values produced by the given story.
        # pageset_repeat doesn't matter because values are summarized across
        # repetitions before uploading.
        story_set = benchmark_class().CreateStorySet(options)
        SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(
            story_set.stories)

        possible_browser = browser_finder.FindBrowser(options)
        if possible_browser is None:
            self.skipTest('Cannot find the browser to run the test.')

        simplified_test_name = self.id().replace(
            'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
            '')

        # Sanity check to ensure that that substring removal was effective.
        assert len(simplified_test_name) < len(self.id())

        if (simplified_test_name in _DISABLED_TESTS
                and not options.run_disabled_tests):
            self.skipTest('Test is explicitly disabled')

        single_page_benchmark = SinglePageBenchmark()
        with open(path_util.GetExpectationsPath()) as fp:
            single_page_benchmark.AugmentExpectationsWithParser(fp.read())

        return_code = single_page_benchmark.Run(options)
        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark_class)
Ejemplo n.º 9
0
def main(args):
  parser = argparse.ArgumentParser(
      description=('Tests if disabled stories exist.'))
  parser.add_argument(
      '--list', action='store_true', default=False,
      help=('Prints list of disabled stories.'))
  options = parser.parse_args(args)
  benchmarks = benchmark_finders.GetAllBenchmarks()
  with open(path_util.GetExpectationsPath()) as fp:
    raw_expectations_data = fp.read()
  if options.list:
    stories = GetDisabledStories(benchmarks, raw_expectations_data)
    print json.dumps(stories, sort_keys=True, indent=4, separators=(',', ': '))
  else:
    validate_story_names(benchmarks, raw_expectations_data)
  return 0