Пример #1
0
 def setUp(self):
     self.options = options_for_unittests.GetRunOptions(
         output_dir=tempfile.mkdtemp(),
         benchmark_cls=FakeBenchmark,
         fake_browser=True)
     self.options.browser_options.platform = fakes.FakeLinuxPlatform()
     self.benchmark = FakeBenchmark()
Пример #2
0
  def testIntegrationCreateJsonTestResults(self, time_module):
    time_module.time.side_effect = [1.0, 6.0123]

    options = options_for_unittests.GetRunOptions(output_dir=self._output_dir)
    options.output_formats = ['json-test-results']
    with results_options.CreateResults(
        options, benchmark_name='test_benchmark') as results:
      results.WillRunPage(self._story_set[0])
      results.DidRunPage(self._story_set[0])

    output_file = os.path.join(self._output_dir, 'test-results.json')
    with open(output_file) as f:
      json_test_results = json.load(f)

    self.assertEquals(json_test_results['interrupted'], False)
    self.assertEquals(json_test_results['num_failures_by_type'], {'PASS': 1})
    self.assertEquals(json_test_results['path_delimiter'], '/')
    self.assertAlmostEqual(json_test_results['seconds_since_epoch'],
                           time.time(), delta=1)
    testBenchmarkFoo = json_test_results['tests']['test_benchmark']['Foo']
    self.assertEquals(testBenchmarkFoo['actual'], 'PASS')
    self.assertEquals(testBenchmarkFoo['expected'], 'PASS')
    self.assertFalse(testBenchmarkFoo['is_unexpected'])
    self.assertEquals(testBenchmarkFoo['time'], 5.0123)
    self.assertEquals(testBenchmarkFoo['times'][0], 5.0123)
    self.assertEquals(json_test_results['version'], 3)
Пример #3
0
 def setUp(self):
     self.options = options_for_unittests.GetRunOptions(
         output_dir=tempfile.mkdtemp())
     # We use a mock platform and story set, so tests can inspect which methods
     # were called and easily override their behavior.
     self.mock_platform = test_stories.TestSharedState.mock_platform
     self.mock_story_test = mock.Mock(spec=story_test.StoryTest)
Пример #4
0
def _GetAllPossiblePageTestInstances():
    page_test_instances = []
    measurements_dir = os.path.dirname(__file__)
    top_level_dir = os.path.dirname(measurements_dir)
    benchmarks_dir = os.path.join(top_level_dir, 'benchmarks')

    # Get all page test instances from measurement classes that are directly
    # constructible
    all_measurement_classes = discover.DiscoverClasses(
        measurements_dir,
        top_level_dir,
        legacy_page_test.LegacyPageTest,
        index_by_class_name=True,
        directly_constructable=True).values()
    for measurement_class in all_measurement_classes:
        page_test_instances.append(measurement_class())

    all_benchmarks_classes = discover.DiscoverClasses(
        benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values()

    # Get all page test instances from defined benchmarks.
    # Note: since this depends on the command line options, there is no guarantee
    # that this will generate all possible page test instances but it's worth
    # enough for smoke test purpose.
    for benchmark_cls in all_benchmarks_classes:
        options = options_for_unittests.GetRunOptions(
            benchmark_cls=benchmark_cls)
        pt = benchmark_cls().CreatePageTest(options)
        if not isinstance(pt,
                          timeline_based_measurement.TimelineBasedMeasurement):
            page_test_instances.append(pt)

    return page_test_instances
Пример #5
0
def GetRunOptions(*args, **kwargs):
    """Augment telemetry options for tests with results_processor defaults."""
    options = options_for_unittests.GetRunOptions(*args, **kwargs)
    parser = command_line.ArgumentParser()
    processor_options = parser.parse_args([])
    for arg in vars(processor_options):
        if not hasattr(options, arg):
            setattr(options, arg, getattr(processor_options, arg))
    return options
Пример #6
0
def GenerateBenchmarkOptions(output_dir, benchmark_cls):
    options = options_for_unittests.GetRunOptions(output_dir=output_dir,
                                                  benchmark_cls=benchmark_cls)
    options.pageset_repeat = 1  # For smoke testing only run each page once.

    # Enable browser logging in the smoke test only. Hopefully, this will detect
    # all crashes and hence remove the need to enable logging in actual perf
    # benchmarks.
    options.browser_options.logging_verbosity = 'non-verbose'
    options.target_platforms = benchmark_cls.GetSupportedPlatformNames(
        benchmark_cls.SUPPORTED_PLATFORMS)
    return options
Пример #7
0
    def BenchmarkSmokeTest(self):
        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # We want to prevent benchmarks from accidentally trying to upload too
                # much data to the chrome perf dashboard. So this tests tries to
                # estimate the amount of values that the benchmark _would_ create when
                # running on the waterfall, and fails if too many values are produced.
                # As we run a single story and not the whole benchmark, the number of
                # max values allowed is scaled proportionally.
                # TODO(crbug.com/981349): This logic is only really valid for legacy
                # values, and does not take histograms into account. An alternative
                # should be implemented when using the results processor.
                type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)

                return story_set

        # Some benchmarks are running multiple iterations
        # which is not needed for a smoke test
        if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
            SinglePageBenchmark.enable_smoke_test_mode = True

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            # Set the benchmark's default arguments.
            options = options_for_unittests.GetRunOptions(
                output_dir=temp_dir,
                benchmark_cls=SinglePageBenchmark,
                environment=chromium_config.GetDefaultChromiumConfig())
            options.pageset_repeat = 1  # For smoke testing only run the page once.

            single_page_benchmark = SinglePageBenchmark()
            # TODO(crbug.com/985103): Remove this code once
            # AugmentExpectationsWithFile is deleted and replaced with functionality
            # in story_filter.py.
            if hasattr(single_page_benchmark, 'AugmentExpectationsWithFile'):
                with open(path_util.GetExpectationsPath()) as fp:
                    single_page_benchmark.AugmentExpectationsWithFile(
                        fp.read())

            return_code = single_page_benchmark.Run(options)

        if return_code == -1:
            self.skipTest('The benchmark was not run.')
        self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
Пример #8
0
  def testIntegrationCreateJsonTestResultsWithNoResults(self):
    options = options_for_unittests.GetRunOptions(output_dir=self._output_dir)
    options.output_formats = ['json-test-results']
    with results_options.CreateResults(options):
      pass

    output_file = os.path.join(self._output_dir, 'test-results.json')
    with open(output_file) as f:
      json_test_results = json.load(f)

    self.assertEquals(json_test_results['interrupted'], False)
    self.assertEquals(json_test_results['num_failures_by_type'], {})
    self.assertEquals(json_test_results['path_delimiter'], '/')
    self.assertAlmostEqual(json_test_results['seconds_since_epoch'],
                           time.time(), 1)
    self.assertEquals(json_test_results['tests'], {})
    self.assertEquals(json_test_results['version'], 3)
Пример #9
0
 def setUp(self):
     self.options = options_for_unittests.GetRunOptions(
         output_dir=tempfile.mkdtemp())
Пример #10
0
 def setUpClass(cls):
     cls._options = options_for_unittests.GetRunOptions()
Пример #11
0
 def GetFakeBrowserOptions(self, overrides=None):
     return options_for_unittests.GetRunOptions(output_dir=self.output_dir,
                                                fake_browser=True,
                                                overrides=overrides)
Пример #12
0
 def setUp(self):
   self.options = options_for_unittests.GetRunOptions(
       output_dir=tempfile.mkdtemp(), fake_browser=True)
 def setUp(self):
     self._skp_outdir = tempfile.mkdtemp('_skp_test')
     self._options = options_for_unittests.GetRunOptions(
         output_dir=self._skp_outdir)
Пример #14
0
 def setUp(self):
   self._options = options_for_unittests.GetRunOptions(
       output_dir=tempfile.mkdtemp())
   # pylint: disable=protected-access
   self._measurement = blink_perf._BlinkPerfMeasurement()
Пример #15
0
 def testBenchmarkOptions(self):
     """Tests whether benchmark options can be constructed without errors."""
     try:
         options_for_unittests.GetRunOptions(benchmark_cls=benchmark)
     except benchmark_module.InvalidOptionsError as exc:
         self.fail(str(exc))
 def setUp(self):
     self._options = options_for_unittests.GetRunOptions(
         output_dir=tempfile.mkdtemp())
     self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
Пример #17
0
 def setUp(self):
   self.options = options_for_unittests.GetRunOptions(
       output_dir=tempfile.mkdtemp())
   self.options.output_formats = ['histograms']
 def setUp(self):
     self._story_runner_logging_stub = None
     self._formatted_exception_buffer = StringIO.StringIO()
     self._original_formatter = exception_formatter.PrintFormattedException
     self.options = options_for_unittests.GetRunOptions(
         output_dir=tempfile.mkdtemp())
Пример #19
0
 def setUpClass(cls):
     cls._options = options_for_unittests.GetRunOptions()
     cls._options.output_dir = tempfile.mkdtemp()