Пример #1
0
    def testPageTestWithIncompatibleStory(self):
        options = options_for_unittests.GetCopy()

        b = TestBenchmark(
            story_module.Story(
                name='test story',
                shared_state_class=shared_page_state.SharedPageState))
        with self.assertRaisesRegexp(
                Exception, 'containing only telemetry.page.Page stories'):
            story_runner.RunBenchmark(b, b.CreateStorySet(options), None,
                                      options.browser_options, options)

        state_class = story_module.SharedState
        b = TestBenchmark(
            story_module.Story(name='test benchmark',
                               shared_state_class=state_class))
        with self.assertRaisesRegexp(
                Exception, 'containing only telemetry.page.Page stories'):
            story_runner.RunBenchmark(b, b.CreateStorySet(options), None,
                                      options.browser_options, options)

        b = TestBenchmark(
            android.AndroidStory(name='test benchmark', start_intent=None))
        with self.assertRaisesRegexp(
                Exception, 'containing only telemetry.page.Page stories'):
            story_runner.RunBenchmark(b, b.CreateStorySet(options), None,
                                      options.browser_options, options)
Пример #2
0
    def Run(self, finder_options):
        """We shouldn't be overriding this according to
    telemetry.benchmark.Benchmark"""
        assert 'chartjson' in finder_options.output_formats, (
            'loading.desktop.network_service requires --output-format=chartjson. '
            'Please contact owner to rewrite the benchmark if chartjson is going '
            'away.')
        assert finder_options.output_dir
        output_dir = finder_options.output_dir
        temp_file_path = os.path.join(output_dir, 'results-chart.json')

        # Run test with feature disabled.
        self.enable_feature = False
        control_return_code = story_runner.RunBenchmark(self, finder_options)
        if control_return_code != 0:
            return control_return_code
        control_chart_json = json.load(open(temp_file_path))

        # Run test again with feature enabled.
        self.enable_feature = True
        enabled_return_code = story_runner.RunBenchmark(self, finder_options)
        if enabled_return_code != 0:
            return enabled_return_code
        enabled_chart_json = json.load(open(temp_file_path))

        logging.info(
            'Starting to merge control chartjson into enabled chartjson')
        try:
            # Merge the result and compute the difference.
            _MergeControlChartJsonIntoEnabled(enabled_chart_json,
                                              control_chart_json)
        except Exception as e:
            logging.error('exception merging two chart json: %s', repr(e))
            traceback.print_exc()
            with open(temp_file_path, 'w') as f:
                json.dump(
                    {
                        'control_chart_json': control_chart_json,
                        'enabled_chart_json': enabled_chart_json
                    },
                    f,
                    indent=2,
                    separators=(',', ': '))
                f.write('\n')
                return 1
        else:
            logging.info('Finished merging chartjsons, writing back to disk')
            with open(temp_file_path, 'w') as f:
                json.dump(enabled_chart_json,
                          f,
                          indent=2,
                          separators=(',', ': '))
                f.write('\n')
        return 0
Пример #3
0
    def testPageTestWithCompatibleStory(self):
        original_run_fn = story_runner.RunStorySet
        was_run = [False]

        def RunStub(*arg, **kwargs):
            del arg, kwargs
            was_run[0] = True

        story_runner.RunStorySet = RunStub

        try:
            options = options_for_unittests.GetCopy()
            options.output_formats = ['none']
            options.output_dir = ''
            options.upload_results = False
            options.suppress_gtest_report = True
            options.results_label = ''

            b = TestBenchmark(page.Page(url='about:blank', name='about:blank'))
            story_runner.RunBenchmark(b, b.CreateStorySet(options), None,
                                      options.browser_options, options)
        finally:
            story_runner.RunStorySet = original_run_fn

        self.assertTrue(was_run[0])
Пример #4
0
    def testRunBenchmarkTimeDuration(self):
        fake_benchmark = FakeBenchmark()
        options = fakes.CreateBrowserFinderOptions()
        options.upload_results = None
        options.suppress_gtest_report = False
        options.results_label = None
        options.use_live_sites = False
        options.max_failures = 100
        options.pageset_repeat = 1
        options.output_formats = ['chartjson']

        with mock.patch(
                'telemetry.internal.story_runner.time.time') as time_patch:
            # 3, because telemetry code asks for the time at some point
            time_patch.side_effect = [1, 0, 61]
            tmp_path = tempfile.mkdtemp()

            try:
                options.output_dir = tmp_path
                story_runner.RunBenchmark(fake_benchmark, options)
                with open(os.path.join(tmp_path, 'results-chart.json')) as f:
                    data = json.load(f)

                self.assertEqual(len(data['charts']), 1)
                charts = data['charts']
                self.assertIn('benchmark_duration', charts)
                duration = charts['benchmark_duration']
                self.assertIn("summary", duration)
                summary = duration['summary']
                duration = summary['value']
                self.assertAlmostEqual(duration, 1)
            finally:
                shutil.rmtree(tmp_path)
Пример #5
0
  def testRunBenchmarkTimeDuration(self):
    fake_benchmark = FakeBenchmark()
    options = self._GenerateBaseBrowserFinderOptions()

    with mock.patch('telemetry.internal.story_runner.time.time') as time_patch:
      # 3, because telemetry code asks for the time at some point
      time_patch.side_effect = [1, 0, 61]
      tmp_path = tempfile.mkdtemp()

      try:
        options.output_dir = tmp_path
        story_runner.RunBenchmark(fake_benchmark, options)
        with open(os.path.join(tmp_path, 'results-chart.json')) as f:
          data = json.load(f)

        self.assertEqual(len(data['charts']), 1)
        charts = data['charts']
        self.assertIn('benchmark_duration', charts)
        duration = charts['benchmark_duration']
        self.assertIn("summary", duration)
        summary = duration['summary']
        duration = summary['value']
        self.assertAlmostEqual(duration, 1)
      finally:
        shutil.rmtree(tmp_path)
Пример #6
0
 def testReturnCodeCaughtException(self):
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory(
           'story', run_side_effect=exceptions.AppCrashException())])
   options = self.GetFakeBrowserOptions()
   return_code = story_runner.RunBenchmark(fake_benchmark, options)
   self.assertEqual(return_code, exit_codes.TEST_FAILURE)
Пример #7
0
 def testReturnCodeUnhandleableError(self):
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory(
           'story', run_side_effect=MemoryError('Unhandleable'))])
   options = self.GetFakeBrowserOptions()
   return_code = story_runner.RunBenchmark(fake_benchmark, options)
   self.assertEqual(return_code, exit_codes.FATAL_ERROR)
Пример #8
0
  def testRunBenchmark_AddsOwners_NoComponent(self):
    @benchmark.Owner(emails=['*****@*****.**'])
    class FakeBenchmarkWithOwner(FakeBenchmark):
      def __init__(self):
        super(FakeBenchmark, self).__init__()
        self._disabled = False
        self._story_disabled = False

    fake_benchmark = FakeBenchmarkWithOwner()
    options = self._GenerateBaseBrowserFinderOptions()
    options.output_formats = ['histograms']
    temp_path = tempfile.mkdtemp()
    try:
      options.output_dir = temp_path
      story_runner.RunBenchmark(fake_benchmark, options)

      with open(os.path.join(temp_path, 'histograms.json')) as f:
        data = json.load(f)

      hs = histogram_set.HistogramSet()
      hs.ImportDicts(data)

      generic_diagnostics = hs.GetSharedDiagnosticsOfType(
          histogram_module.GenericSet)

      self.assertGreater(len(generic_diagnostics), 0)

      generic_diagnostics_values = [
          list(diagnostic) for diagnostic in generic_diagnostics]

      self.assertIn(['*****@*****.**'], generic_diagnostics_values)

    finally:
      shutil.rmtree(temp_path)
Пример #9
0
 def testRunStoryWithMissingArchiveFile(self):
     fake_benchmark = FakeBenchmark(
         archive_data_file='data/does-not-exist.json')
     options = self.GetFakeBrowserOptions()
     return_code = story_runner.RunBenchmark(fake_benchmark, options)
     self.assertEqual(return_code, 2)  # Benchmark was interrupted.
     self.assertIn('ArchiveError', sys.stderr.getvalue())
Пример #10
0
    def testUnexpectedSkipsWithFiltering(self):
        # We prepare side effects for 50 stories, the first 30 run fine, the
        # remaining 20 fail with a fatal error.
        fatal_error = MemoryError('this is an unexpected exception')
        side_effects = [None] * 30 + [fatal_error] * 20

        fake_benchmark = FakeBenchmark(stories=(
            test_stories.DummyStory('story_%i' % i, run_side_effect=effect)
            for i, effect in enumerate(side_effects)))

        # Set the filtering to only run from story_10 --> story_40
        options = self.GetFakeBrowserOptions({
            'story_shard_begin_index': 10,
            'story_shard_end_index': 41
        })
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEquals(exit_codes.FATAL_ERROR, return_code)

        # The results should contain entries of story 10 --> story 40. Of those
        # entries, story 31's actual result is 'FAIL' and
        # stories from 31 to 40 will shows 'SKIP'.
        test_results = self.ReadTestResults()
        self.assertEqual(len(test_results), 31)

        expected = []
        expected.extend(('story_%i' % i, 'PASS') for i in xrange(10, 30))
        expected.append(('story_30', 'FAIL'))
        expected.extend(('story_%i' % i, 'SKIP') for i in xrange(31, 41))

        for (story, status), result in zip(expected, test_results):
            self.assertEqual(result['testPath'], 'fake_benchmark/%s' % story)
            self.assertEqual(result['status'], status)
Пример #11
0
 def testDisabledBenchmarkViaCanRunOnPlatform(self):
     fake_benchmark = FakeBenchmark()
     fake_benchmark.SUPPORTED_PLATFORMS = []
     options = self.GetFakeBrowserOptions()
     story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     self.assertFalse(test_results)  # No tests ran at all.
Пример #12
0
    def testArtifactLogsContainHandleableException(self):
        def failed_run():
            logging.warning('This will fail gracefully')
            raise exceptions.TimeoutException('karma!')

        fake_benchmark = FakeBenchmark(stories=[
            test_stories.DummyStory('story1', run_side_effect=failed_run),
            test_stories.DummyStory('story2')
        ])

        options = self.GetFakeBrowserOptions()
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEqual(return_code, exit_codes.TEST_FAILURE)
        test_results = self.ReadTestResults()
        self.assertEqual(len(test_results), 2)

        # First story failed.
        self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
        self.assertEqual(test_results[0]['status'], 'FAIL')
        self.assertIn('logs.txt', test_results[0]['outputArtifacts'])

        with open(test_results[0]['outputArtifacts']['logs.txt']
                  ['filePath']) as f:
            test_log = f.read()

        # Ensure that the log contains warning messages and python stack.
        self.assertIn('Handleable error', test_log)
        self.assertIn('This will fail gracefully', test_log)
        self.assertIn("raise exceptions.TimeoutException('karma!')", test_log)

        # Second story ran fine.
        self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
        self.assertEqual(test_results[1]['status'], 'PASS')
Пример #13
0
    def testArtifactLogsContainUnhandleableException(self):
        def failed_run():
            logging.warning('This will fail badly')
            raise MemoryError('this is a fatal exception')

        fake_benchmark = FakeBenchmark(stories=[
            test_stories.DummyStory('story1', run_side_effect=failed_run),
            test_stories.DummyStory('story2')
        ])

        options = self.GetFakeBrowserOptions()
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEqual(return_code, exit_codes.FATAL_ERROR)
        test_results = self.ReadTestResults()
        self.assertEqual(len(test_results), 2)

        # First story failed.
        self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
        self.assertEqual(test_results[0]['status'], 'FAIL')
        self.assertIn('logs.txt', test_results[0]['outputArtifacts'])

        with open(test_results[0]['outputArtifacts']['logs.txt']
                  ['filePath']) as f:
            test_log = f.read()

        # Ensure that the log contains warning messages and python stack.
        self.assertIn('Unhandleable error', test_log)
        self.assertIn('This will fail badly', test_log)
        self.assertIn("raise MemoryError('this is a fatal exception')",
                      test_log)

        # Second story was skipped.
        self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
        self.assertEqual(test_results[1]['status'], 'SKIP')
Пример #14
0
 def testReturnCodeDisabledStory(self):
     fake_benchmark = FakeBenchmark(stories=['fake_story'])
     fake_story_filter = FakeStoryFilter(stories_to_skip=['fake_story'])
     options = self.GetFakeBrowserOptions()
     with mock.patch(
             'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
             return_value=fake_story_filter):
         return_code = story_runner.RunBenchmark(fake_benchmark, options)
     self.assertEqual(return_code, exit_codes.ALL_TESTS_SKIPPED)
Пример #15
0
 def testRangeIndexRanges(self):
     fake_benchmark = FakeBenchmark(
         stories=(test_stories.DummyStory('story_%i' % i)
                  for i in range(100)))
     options = self.GetFakeBrowserOptions(
         {'story_shard_indexes': "-10, 20-30, 90-"})
     story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     self.assertEqual(len(test_results), 30)
Пример #16
0
    def testValidateBenchmarkName(self):
        class FakeBenchmarkWithBadName(FakeBenchmark):
            NAME = 'bad/benchmark (name)'

        fake_benchmark = FakeBenchmarkWithBadName()
        options = self.GetFakeBrowserOptions()
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEqual(return_code, 2)
        self.assertIn('Invalid benchmark name', sys.stderr.getvalue())
Пример #17
0
 def testFullRun(self):
   options = self.GetFakeBrowserOptions()
   story_filter.StoryFilterFactory.ProcessCommandLineArgs(
       parser=None, args=options)
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory('story1', tags=['important']),
       test_stories.DummyStory('story2', tags=['other']),
   ], abridging_tag='important')
   story_runner.RunBenchmark(fake_benchmark, options)
   test_results = self.ReadTestResults()
   self.assertEqual(len(test_results), 2)
Пример #18
0
 def testSkippedWithStoryFilter(self):
     fake_benchmark = FakeBenchmark(stories=['fake_story'])
     options = self.GetFakeBrowserOptions()
     fake_story_filter = FakeStoryFilter(stories_to_skip=['fake_story'])
     with mock.patch(
             'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
             return_value=fake_story_filter):
         story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     self.assertTrue(test_results)  # Some tests ran, but all skipped.
     self.assertTrue(all(t['status'] == 'SKIP' for t in test_results))
Пример #19
0
 def testDeviceInfo(self):
     fake_benchmark = FakeBenchmark(stories=['fake_story'])
     options = self.GetFakeBrowserOptions()
     options.fake_possible_browser = fakes.FakePossibleBrowser(
         arch_name='abc', os_name='win', os_version_name='win10')
     story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     diagnostics = ReadDiagnostics(test_results[0])
     self.assertEqual(diagnostics['architectures'], ['abc'])
     self.assertEqual(diagnostics['osNames'], ['win'])
     self.assertEqual(diagnostics['osVersions'], ['win10'])
Пример #20
0
 def testStoryFlag(self):
     options = self.GetFakeBrowserOptions()
     args = fakes.FakeParsedArgsForStoryFilter(stories=['story1', 'story3'])
     story_filter.StoryFilterFactory.ProcessCommandLineArgs(parser=None,
                                                            args=args)
     fake_benchmark = FakeBenchmark(stories=['story1', 'story2', 'story3'])
     story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     self.assertEqual(len(test_results), 2)
     self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
     self.assertTrue(test_results[1]['testPath'].endswith('/story3'))
Пример #21
0
 def testRunBenchmarkDisabledStoryWithBadName(self):
   fake_benchmark = FakeBenchmark()
   fake_benchmark.story_disabled = True
   options = self._GenerateBaseBrowserFinderOptions()
   tmp_path = tempfile.mkdtemp()
   try:
     options.output_dir = tmp_path
     rc = story_runner.RunBenchmark(fake_benchmark, options)
     # Test should return 0 since only error messages are logged.
     self.assertEqual(rc, 0)
   finally:
     shutil.rmtree(tmp_path)
Пример #22
0
    def testWithOwnerInfoButNoUrl(self):
        @benchmark.Owner(emails=['*****@*****.**'])
        class FakeBenchmarkWithOwner(FakeBenchmark):
            pass

        fake_benchmark = FakeBenchmarkWithOwner()
        options = self.GetFakeBrowserOptions()
        story_runner.RunBenchmark(fake_benchmark, options)
        test_results = self.ReadTestResults()
        diagnostics = ReadDiagnostics(test_results[0])
        self.assertEqual(diagnostics['owners'], ['*****@*****.**'])
        self.assertNotIn('documentationLinks', diagnostics)
Пример #23
0
 def testOneStoryFilteredOneNot(self):
     fake_story_filter = FakeStoryFilter(stories_to_filter_out=['story1'])
     fake_benchmark = FakeBenchmark(stories=['story1', 'story2'])
     options = self.GetFakeBrowserOptions()
     with mock.patch(
             'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
             return_value=fake_story_filter):
         story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     self.assertEqual(len(test_results), 1)
     self.assertEqual(test_results[0]['status'], 'PASS')
     self.assertTrue(test_results[0]['testPath'].endswith('/story2'))
Пример #24
0
 def testAbridged(self):
   options = self.GetFakeBrowserOptions()
   options.run_abridged_story_set = True
   story_filter.StoryFilterFactory.ProcessCommandLineArgs(
       parser=None, args=options)
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory('story1', tags=['important']),
       test_stories.DummyStory('story2', tags=['other']),
   ], abridging_tag='important')
   story_runner.RunBenchmark(fake_benchmark, options)
   test_results = self.ReadTestResults()
   self.assertEqual(len(test_results), 1)
   self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
Пример #25
0
 def testRunBenchmarkDisabledBenchmark(self):
   fake_benchmark = FakeBenchmark()
   fake_benchmark.disabled = True
   options = self._GenerateBaseBrowserFinderOptions()
   tmp_path = tempfile.mkdtemp()
   try:
     options.output_dir = tmp_path
     story_runner.RunBenchmark(fake_benchmark, options)
     with open(os.path.join(tmp_path, 'results-chart.json')) as f:
       data = json.load(f)
     self.assertFalse(data['enabled'])
   finally:
     shutil.rmtree(tmp_path)
Пример #26
0
 def testOneStorySkippedOneNot(self):
     fake_story_filter = FakeStoryFilter(stories_to_skip=['story1'])
     fake_benchmark = FakeBenchmark(stories=['story1', 'story2'])
     options = self.GetFakeBrowserOptions()
     with mock.patch(
             'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
             return_value=fake_story_filter):
         story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     status = [t['status'] for t in test_results]
     self.assertEqual(len(status), 2)
     self.assertIn('SKIP', status)
     self.assertIn('PASS', status)
Пример #27
0
  def testDownloadMinimalServingDirs(self):
    fake_benchmark = FakeBenchmark(stories=[
        test_stories.DummyStory(
            'story_foo', serving_dir='/files/foo', tags=['foo']),
        test_stories.DummyStory(
            'story_bar', serving_dir='/files/bar', tags=['bar']),
    ], cloud_bucket=cloud_storage.PUBLIC_BUCKET)
    options = self.GetFakeBrowserOptions(overrides={'story_tag_filter': 'foo'})
    with mock.patch(
        'py_utils.cloud_storage.GetFilesInDirectoryIfChanged') as get_files:
      story_runner.RunBenchmark(fake_benchmark, options)

    # Foo is the only included story serving dir.
    self.assertEqual(get_files.call_count, 1)
    get_files.assert_called_once_with('/files/foo', cloud_storage.PUBLIC_BUCKET)
Пример #28
0
    def testWithOwnerInfo(self):
        @benchmark.Owner(emails=['*****@*****.**', '*****@*****.**'],
                         component='fooBar',
                         documentation_url='https://example.com/')
        class FakeBenchmarkWithOwner(FakeBenchmark):
            pass

        fake_benchmark = FakeBenchmarkWithOwner()
        options = self.GetFakeBrowserOptions()
        story_runner.RunBenchmark(fake_benchmark, options)
        test_results = self.ReadTestResults()
        diagnostics = ReadDiagnostics(test_results[0])
        self.assertEqual(diagnostics['owners'],
                         ['*****@*****.**', '*****@*****.**'])
        self.assertEqual(diagnostics['bugComponents'], ['fooBar'])
        self.assertEqual(
            diagnostics['documentationLinks'],
            [['Benchmark documentation link', 'https://example.com/']])
Пример #29
0
 def Run(self, finder_options):
     """Do not override this method."""
     finder_options.target_platforms = self.GetSupportedPlatformNames(
         self.SUPPORTED_PLATFORMS)
     return story_runner.RunBenchmark(self, finder_options)
Пример #30
0
 def Run(self, finder_options):
     """Do not override this method."""
     return story_runner.RunBenchmark(self, finder_options)