示例#1
0
    def BenchmarkSmokeTest(self):
        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(benchmark_class, 'enable_smoke_test_mode'):
            benchmark_class.enable_smoke_test_mode = True

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            options = testing.GetRunOptions(
                output_dir=temp_dir,
                benchmark_cls=benchmark_class,
                overrides={'story_shard_end_index': num_pages},
                environment=chromium_config.GetDefaultChromiumConfig())
            options.pageset_repeat = 1  # For smoke testing only run the page once.
            options.output_formats = ['histograms']
            options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE
            results_processor.ProcessOptions(options)

            return_code = benchmark_class().Run(options)
            # TODO(crbug.com/1019139): Make 111 be the exit code that means
            # "no stories were run.".
            if return_code in (-1, 111):
                self.skipTest('The benchmark was not run.')
            self.assertEqual(return_code,
                             0,
                             msg='Benchmark run failed: %s' %
                             benchmark_class.Name())
            return_code = results_processor.ProcessResults(options)
            self.assertEqual(return_code,
                             0,
                             msg='Result processing failed: %s' %
                             benchmark_class.Name())
示例#2
0
    def BenchmarkSmokeTest(self):
        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # We want to prevent benchmarks from accidentally trying to upload too
                # much data to the chrome perf dashboard. So this tests tries to
                # estimate the amount of values that the benchmark _would_ create when
                # running on the waterfall, and fails if too many values are produced.
                # As we run a single story and not the whole benchmark, the number of
                # max values allowed is scaled proportionally.
                # TODO(crbug.com/981349): This logic is only really valid for legacy
                # values, and does not take histograms into account. An alternative
                # should be implemented when using the results processor.
                type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)

                return story_set

        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
            SinglePageBenchmark.enable_smoke_test_mode = True

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            # Set the benchmark's default arguments.
            options = options_for_unittests.GetRunOptions(
                output_dir=temp_dir,
                benchmark_cls=SinglePageBenchmark,
                environment=chromium_config.GetDefaultChromiumConfig())
            options.pageset_repeat = 1  # For smoke testing only run the page once.

            single_page_benchmark = SinglePageBenchmark()
            # TODO(crbug.com/985103): Remove this code once
            # AugmentExpectationsWithFile is deleted and replaced with functionality
            # in story_filter.py.
            if hasattr(single_page_benchmark, 'AugmentExpectationsWithFile'):
                with open(path_util.GetExpectationsPath()) as fp:
                    single_page_benchmark.AugmentExpectationsWithFile(
                        fp.read())

            return_code = single_page_benchmark.Run(options)

        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
示例#3
0
def GenerateBenchmarkOptions(output_dir, benchmark_cls):
    options = options_for_unittests.GetRunOptions(
        output_dir=output_dir,
        benchmark_cls=benchmark_cls,
        environment=chromium_config.GetDefaultChromiumConfig())
    options.pageset_repeat = 1  # For smoke testing only run each page once.

    # Enable browser logging in the smoke test only. Hopefully, this will detect
    # all crashes and hence remove the need to enable logging in actual perf
    # benchmarks.
    options.browser_options.logging_verbosity = 'non-verbose'
    options.target_platforms = benchmark_cls.GetSupportedPlatformNames(
        benchmark_cls.SUPPORTED_PLATFORMS)
    return options
示例#4
0
def GenerateBenchmarkOptions(output_dir, benchmark_cls):
    options = testing.GetRunOptions(
        output_dir=output_dir,
        benchmark_cls=benchmark_cls,
        overrides={'run_full_story_set': True},
        environment=chromium_config.GetDefaultChromiumConfig())
    options.pageset_repeat = 1  # For smoke testing only run each page once.
    options.output_formats = ['histograms']
    options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE

    # Enable browser logging in the smoke test only. Hopefully, this will detect
    # all crashes and hence remove the need to enable logging in actual perf
    # benchmarks.
    options.browser_options.logging_verbosity = 'non-verbose'
    options.target_platforms = benchmark_cls.GetSupportedPlatformNames(
        benchmark_cls.SUPPORTED_PLATFORMS)
    return options