示例#1
0
    def testArtifactLogsContainHandleableException(self):
        def failed_run():
            logging.warning('This will fail gracefully')
            raise exceptions.TimeoutException('karma!')

        fake_benchmark = FakeBenchmark(stories=[
            test_stories.DummyStory('story1', run_side_effect=failed_run),
            test_stories.DummyStory('story2')
        ])

        options = self.GetFakeBrowserOptions()
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEqual(return_code, exit_codes.TEST_FAILURE)
        test_results = self.ReadTestResults()
        self.assertEqual(len(test_results), 2)

        # First story failed.
        self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
        self.assertEqual(test_results[0]['status'], 'FAIL')
        self.assertIn('logs.txt', test_results[0]['outputArtifacts'])

        with open(test_results[0]['outputArtifacts']['logs.txt']
                  ['filePath']) as f:
            test_log = f.read()

        # Ensure that the log contains warning messages and python stack.
        self.assertIn('Handleable error', test_log)
        self.assertIn('This will fail gracefully', test_log)
        self.assertIn("raise exceptions.TimeoutException('karma!')", test_log)

        # Second story ran fine.
        self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
        self.assertEqual(test_results[1]['status'], 'PASS')
  def testAppCrashThenRaiseInTearDown_Interrupted(
      self, tear_down_state, dump_state_upon_story_run_failure):
    class TearDownStateException(Exception):
      pass

    tear_down_state.side_effect = TearDownStateException()
    root_mock = mock.Mock()
    root_mock.attach_mock(tear_down_state, 'state.TearDownState')
    root_mock.attach_mock(dump_state_upon_story_run_failure,
                          'state.DumpStateUponStoryRunFailure')
    self.RunStories([
        test_stories.DummyStory(
            'foo', run_side_effect=exceptions.AppCrashException(msg='crash!')),
        test_stories.DummyStory('bar')])

    self.assertEqual([call[0] for call in root_mock.mock_calls], [
        'state.DumpStateUponStoryRunFailure',
        # This tear down happens because of the app crash.
        'state.TearDownState',
        # This one happens since state must be re-created to check whether
        # later stories should be skipped or unexpectedly skipped. Then
        # state is torn down normally at the end of the runs.
        'state.TearDownState'
    ])

    test_results = self.ReadTestResults()
    self.assertEqual(len(test_results), 2)
    # First story unexpectedly failed with AppCrashException.
    self.assertEqual(test_results[0]['status'], 'FAIL')
    self.assertFalse(test_results[0]['expected'])
    # Second story unexpectedly skipped due to exception during tear down.
    self.assertEqual(test_results[1]['status'], 'SKIP')
    self.assertFalse(test_results[1]['expected'])
示例#3
0
    def testArtifactLogsContainUnhandleableException(self):
        def failed_run():
            logging.warning('This will fail badly')
            raise MemoryError('this is a fatal exception')

        fake_benchmark = FakeBenchmark(stories=[
            test_stories.DummyStory('story1', run_side_effect=failed_run),
            test_stories.DummyStory('story2')
        ])

        options = self.GetFakeBrowserOptions()
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEqual(return_code, exit_codes.FATAL_ERROR)
        test_results = self.ReadTestResults()
        self.assertEqual(len(test_results), 2)

        # First story failed.
        self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
        self.assertEqual(test_results[0]['status'], 'FAIL')
        self.assertIn('logs.txt', test_results[0]['outputArtifacts'])

        with open(test_results[0]['outputArtifacts']['logs.txt']
                  ['filePath']) as f:
            test_log = f.read()

        # Ensure that the log contains warning messages and python stack.
        self.assertIn('Unhandleable error', test_log)
        self.assertIn('This will fail badly', test_log)
        self.assertIn("raise MemoryError('this is a fatal exception')",
                      test_log)

        # Second story was skipped.
        self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
        self.assertEqual(test_results[1]['status'], 'SKIP')
 def testRaiseBrowserGoneExceptionFromRunPage(self):
   self.RunStories([
       test_stories.DummyStory(
           'foo', run_side_effect=exceptions.BrowserGoneException(
               None, 'i am a browser crash message')),
       test_stories.DummyStory('bar')])
   test_results = self.ReadTestResults()
   self.assertEqual(['FAIL', 'PASS'],
                    [test['status'] for test in test_results])
   self.assertIn('i am a browser crash message', sys.stderr.getvalue())
 def testFullRun(self):
   options = self.GetFakeBrowserOptions()
   story_filter.StoryFilterFactory.ProcessCommandLineArgs(
       parser=None, args=options)
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory('story1', tags=['important']),
       test_stories.DummyStory('story2', tags=['other']),
   ], abridging_tag='important')
   story_runner.RunBenchmark(fake_benchmark, options)
   test_results = self.ReadTestResults()
   self.assertEqual(len(test_results), 2)
  def testUnknownExceptionIsNotFatal(self):
    class UnknownException(Exception):
      pass

    self.RunStories([
        test_stories.DummyStory(
            'foo', run_side_effect=UnknownException('FooException')),
        test_stories.DummyStory('bar')])
    test_results = self.ReadTestResults()
    self.assertEqual(['FAIL', 'PASS'],
                     [test['status'] for test in test_results])
    self.assertIn('FooException', sys.stderr.getvalue())
 def testAbridged(self):
   options = self.GetFakeBrowserOptions()
   options.run_abridged_story_set = True
   story_filter.StoryFilterFactory.ProcessCommandLineArgs(
       parser=None, args=options)
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory('story1', tags=['important']),
       test_stories.DummyStory('story2', tags=['other']),
   ], abridging_tag='important')
   story_runner.RunBenchmark(fake_benchmark, options)
   test_results = self.ReadTestResults()
   self.assertEqual(len(test_results), 1)
   self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
  def testDownloadMinimalServingDirs(self):
    fake_benchmark = FakeBenchmark(stories=[
        test_stories.DummyStory(
            'story_foo', serving_dir='/files/foo', tags=['foo']),
        test_stories.DummyStory(
            'story_bar', serving_dir='/files/bar', tags=['bar']),
    ], cloud_bucket=cloud_storage.PUBLIC_BUCKET)
    options = self.GetFakeBrowserOptions(overrides={'story_tag_filter': 'foo'})
    with mock.patch(
        'py_utils.cloud_storage.GetFilesInDirectoryIfChanged') as get_files:
      story_runner.RunBenchmark(fake_benchmark, options)

    # Foo is the only included story serving dir.
    self.assertEqual(get_files.call_count, 1)
    get_files.assert_called_once_with('/files/foo', cloud_storage.PUBLIC_BUCKET)
 def testReturnCodeCaughtException(self):
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory(
           'story', run_side_effect=exceptions.AppCrashException())])
   options = self.GetFakeBrowserOptions()
   return_code = story_runner.RunBenchmark(fake_benchmark, options)
   self.assertEqual(return_code, exit_codes.TEST_FAILURE)
示例#10
0
 def testReturnCodeUnhandleableError(self):
   fake_benchmark = FakeBenchmark(stories=[
       test_stories.DummyStory(
           'story', run_side_effect=MemoryError('Unhandleable'))])
   options = self.GetFakeBrowserOptions()
   return_code = story_runner.RunBenchmark(fake_benchmark, options)
   self.assertEqual(return_code, exit_codes.FATAL_ERROR)
示例#11
0
    def testUnexpectedSkipsWithFiltering(self):
        # We prepare side effects for 50 stories, the first 30 run fine, the
        # remaining 20 fail with a fatal error.
        fatal_error = MemoryError('this is an unexpected exception')
        side_effects = [None] * 30 + [fatal_error] * 20

        fake_benchmark = FakeBenchmark(stories=(
            test_stories.DummyStory('story_%i' % i, run_side_effect=effect)
            for i, effect in enumerate(side_effects)))

        # Set the filtering to only run from story_10 --> story_40
        options = self.GetFakeBrowserOptions({
            'story_shard_begin_index': 10,
            'story_shard_end_index': 41
        })
        return_code = story_runner.RunBenchmark(fake_benchmark, options)
        self.assertEquals(exit_codes.FATAL_ERROR, return_code)

        # The results should contain entries of story 10 --> story 40. Of those
        # entries, story 31's actual result is 'FAIL' and
        # stories from 31 to 40 will shows 'SKIP'.
        test_results = self.ReadTestResults()
        self.assertEqual(len(test_results), 31)

        expected = []
        expected.extend(('story_%i' % i, 'PASS') for i in xrange(10, 30))
        expected.append(('story_30', 'FAIL'))
        expected.extend(('story_%i' % i, 'SKIP') for i in xrange(31, 41))

        for (story, status), result in zip(expected, test_results):
            self.assertEqual(result['testPath'], 'fake_benchmark/%s' % story)
            self.assertEqual(result['status'], status)
示例#12
0
 def testAppCrashExceptionCausesFailure(self):
   self.RunStories([test_stories.DummyStory(
       'story',
       run_side_effect=exceptions.AppCrashException(msg='App Foo crashes'))])
   test_results = self.ReadTestResults()
   self.assertEqual(['FAIL'],
                    [test['status'] for test in test_results])
   self.assertIn('App Foo crashes', sys.stderr.getvalue())
示例#13
0
 def testRangeIndexRanges(self):
     fake_benchmark = FakeBenchmark(
         stories=(test_stories.DummyStory('story_%i' % i)
                  for i in range(100)))
     options = self.GetFakeBrowserOptions(
         {'story_shard_indexes': "-10, 20-30, 90-"})
     story_runner.RunBenchmark(fake_benchmark, options)
     test_results = self.ReadTestResults()
     self.assertEqual(len(test_results), 30)
  def testSingleSuccessfulStory(self):
    story1 = test_stories.DummyStory('story1')
    with self.CreateResults() as results:
      with results.CreateStoryRun(story1):
        self._mock_time.return_value = 0.007

    expected = ('[ RUN      ] benchmark/story1\n'
                '[       OK ] benchmark/story1 (7 ms)\n'
                '[  PASSED  ] 1 test.\n\n')
    self.assertOutputEquals(expected)
  def testSingleSkippedStory(self):
    story1 = test_stories.DummyStory('story1')
    with self.CreateResults() as results:
      with results.CreateStoryRun(story1):
        self._mock_time.return_value = 0.007
        results.Skip('Story skipped for testing reason')

    expected = ('[ RUN      ] benchmark/story1\n'
                '== Skipping story: Story skipped for testing reason ==\n'
                '[  SKIPPED ] benchmark/story1 (7 ms)\n'
                '[  PASSED  ] 0 tests.\n'
                '[  SKIPPED ] 1 test.\n\n')
    self.assertOutputEquals(expected)
  def testSingleFailedStory(self):
    story1 = test_stories.DummyStory('story1')
    with self.CreateResults() as results:
      with results.CreateStoryRun(story1):
        results.Fail('test fails')

    expected = ('[ RUN      ] benchmark/story1\n'
                '[  FAILED  ] benchmark/story1 (0 ms)\n'
                '[  PASSED  ] 0 tests.\n'
                '[  FAILED  ] 1 test, listed below:\n'
                '[  FAILED  ]  benchmark/story1\n\n'
                '1 FAILED TEST\n\n')
    self.assertOutputEquals(expected)
示例#17
0
 def testWriteBenchmarkMetadata(self):
     story = test_stories.DummyStory('story')
     with page_test_results.PageTestResults(
             benchmark_name='benchmark_name') as results:
         with results.CreateStoryRun(story):
             self.agent.StartAgentTracing(self.config, timeout=10)
             telemetry_tracing_agent.RecordBenchmarkMetadata(results)
             self.agent.StopAgentTracing()
             with trace_data.TraceDataBuilder() as builder:
                 self.agent.CollectAgentTraceData(builder)
                 trace = builder.AsData().GetTraceFor(
                     trace_data.TELEMETRY_PART)
     benchmarks = trace['metadata']['telemetry']['benchmarks']
     self.assertEqual(len(benchmarks), 1)
     self.assertEqual(benchmarks[0], 'benchmark_name')
示例#18
0
 def _testMaxFailuresOptionIsRespectedAndOverridable(
     self, num_failing_stories, runner_max_failures, options_max_failures,
     expected_num_failures, expected_num_skips):
   if options_max_failures:
     self.options.max_failures = options_max_failures
   self.RunStories([
       test_stories.DummyStory(
           'failing_%d' % i, run_side_effect=Exception('boom!'))
       for i in range(num_failing_stories)
   ], max_failures=runner_max_failures)
   test_results = self.ReadTestResults()
   self.assertEqual(len(test_results),
                    expected_num_failures + expected_num_skips)
   for i, test in enumerate(test_results):
     expected_status = 'FAIL' if i < expected_num_failures else 'SKIP'
     self.assertEqual(test['status'], expected_status)