示例#1
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError():
        results.AddValue(failure.FailureValue(story, sys.exc_info()))

    try:
        if isinstance(test, story_test.StoryTest):
            test.WillRunStory(state.platform)
        state.WillRunStory(story)
        if not state.CanRunStory(story):
            results.AddValue(
                skip.SkipValue(
                    story, 'Skipped because story is not supported '
                    '(SharedState.CanRunStory() returns False).'))
            return
        state.RunStory(results)
        if isinstance(test, story_test.StoryTest):
            test.Measure(state.platform, results)
    except (page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(story, 'Unsupported page action: %s' % e))
    except Exception:
        results.AddValue(
            failure.FailureValue(story, sys.exc_info(),
                                 'Unhandlable exception raised.'))
        raise
    finally:
        has_existing_exception = (sys.exc_info() != (None, None, None))
        try:
            state.DidRunStory(results)
            # if state.DidRunStory raises exception, things are messed up badly and we
            # do not need to run test.DidRunStory at that point.
            if isinstance(test, story_test.StoryTest):
                test.DidRunStory(state.platform)
            else:
                test.DidRunPage(state.platform)
        except Exception:
            if not has_existing_exception:
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception raised when cleaning story run: ')
示例#2
0
 def testBuildbotAndRepresentativeValue(self):
   v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
   self.assertIsNone(v.GetBuildbotValue())
   self.assertIsNone(v.GetBuildbotDataType(
       value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
   self.assertIsNone(v.GetChartAndTraceNameForPerPageResult())
   self.assertIsNone(v.GetRepresentativeNumber())
   self.assertIsNone(v.GetRepresentativeString())
示例#3
0
    def testAsDictWithRepeatedTests(self):
        results = page_test_results.PageTestResults()
        results.telemetry_info.benchmark_start_epoch = 1501773200
        results.telemetry_info.benchmark_name = 'benchmark_name'

        results.WillRunPage(self._story_set[0])
        v0 = scalar.ScalarValue(
            results.current_page,
            'foo',
            'seconds',
            3,
            improvement_direction=improvement_direction.DOWN)
        results.AddValue(v0)
        results.DidRunPage(self._story_set[0])

        results.WillRunPage(self._story_set[1])
        v1 = skip.SkipValue(results.current_page, 'fake_skip')
        results.AddValue(v1)
        results.DidRunPage(self._story_set[1])

        results.WillRunPage(self._story_set[0])
        v0 = scalar.ScalarValue(
            results.current_page,
            'foo',
            'seconds',
            3,
            improvement_direction=improvement_direction.DOWN)
        results.AddValue(v0)
        results.DidRunPage(self._story_set[0])

        results.WillRunPage(self._story_set[1])
        v1 = skip.SkipValue(results.current_page, 'fake_skip')
        results.AddValue(v1)
        results.DidRunPage(self._story_set[1])

        d = json_3_output_formatter.ResultsAsDict(results)
        foo_story_result = d['tests']['benchmark_name']['Foo']
        self.assertEquals(foo_story_result['actual'], 'PASS')
        self.assertEquals(foo_story_result['expected'], 'PASS')

        bar_story_result = d['tests']['benchmark_name']['Bar']
        self.assertEquals(bar_story_result['actual'], 'SKIP')
        self.assertEquals(bar_story_result['expected'], 'SKIP')

        self.assertEquals(d['num_failures_by_type'], {'SKIP': 2, 'PASS': 2})
示例#4
0
  def testRepr(self):
    v = skip.SkipValue(self.pages[0], 'page skipped for testing reason',
                       description='desc')

    expected = ('SkipValue(http://www.bar.com/, '
                'page skipped for testing reason, '
                'description=desc)')

    self.assertEquals(expected, str(v))
示例#5
0
    def testStoryRunSkipped(self):
        run = story_run.StoryRun(self.stories[0])
        run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
        run.AddValue(skip.SkipValue(self.stories[0], 'test'))
        self.assertFalse(run.ok)
        self.assertFalse(run.failed)
        self.assertTrue(run.skipped)

        run = story_run.StoryRun(self.stories[0])
        run.AddValue(
            scalar.ScalarValue(self.stories[0],
                               'a',
                               's',
                               1,
                               improvement_direction=improvement_direction.UP))
        run.AddValue(skip.SkipValue(self.stories[0], 'test'))
        self.assertFalse(run.ok)
        self.assertFalse(run.failed)
        self.assertTrue(run.skipped)
示例#6
0
def _RunUserStoryAndProcessErrorIfNeeded(expectations, user_story, results,
                                         state):
    def ProcessError():
        if expectation == 'fail':
            msg = 'Expected exception while running %s' % user_story.display_name
            exception_formatter.PrintFormattedException(msg=msg)
        else:
            msg = 'Exception while running %s' % user_story.display_name
            results.AddValue(failure.FailureValue(user_story, sys.exc_info()))

    try:
        expectation = None
        state.WillRunUserStory(user_story)
        expectation, skip_value = state.GetTestExpectationAndSkipValue(
            expectations)
        if expectation == 'skip':
            assert skip_value
            results.AddValue(skip_value)
            return
        state.RunUserStory(results)
    except (page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(user_story, 'Unsupported page action: %s' % e))
    except Exception:
        results.AddValue(
            failure.FailureValue(user_story, sys.exc_info(),
                                 'Unhandlable exception raised.'))
        raise
    else:
        if expectation == 'fail':
            logging.warning('%s was expected to fail, but passed.\n',
                            user_story.display_name)
    finally:
        has_existing_exception = sys.exc_info() is not None
        try:
            state.DidRunUserStory(results)
        except Exception:
            if not has_existing_exception:
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception from DidRunUserStory: ')
 def ValidateAndMeasurePage(self, page, tab, results):
   del page  # unused
   # Trigger GC to get histogram data.
   # Seven GCs should be enough to collect any detached context.
   # If a detached context survives more GCs then there is a leak.
   MAX_AGE = 8
   for _ in xrange(MAX_AGE):
     tab.CollectGarbage()
   value = _GetMaxDetachedContextAge(tab, self._data_start)
   if value is None:
     results.AddValue(skip.SkipValue(
         results.current_page, 'No detached contexts'))
   else:
     results.AddValue(scalar.ScalarValue(
         results.current_page, _DISPLAY_NAME, _UNITS, value,
         description=_DESCRIPTION))
  def testSingleSkippedPage(self):
    test_story_set = _MakeStorySet()
    results = page_test_results.PageTestResults(
        progress_reporter=self._reporter)
    results.WillRunPage(test_story_set.stories[0])
    self._fake_timer.SetTime(0.007)
    results.AddValue(skip.SkipValue(test_story_set.stories[0],
        'Page skipped for testing reason'))
    results.DidRunPage(test_story_set.stories[0])

    results.PrintSummary()
    expected = ('[ RUN      ] http://www.foo.com/\n'
                '===== SKIPPING TEST http://www.foo.com/:'
                ' Page skipped for testing reason =====\n'
                '[       OK ] http://www.foo.com/ (7 ms)\n'
                '[  PASSED  ] 1 test.\n\n')
    self.assertEquals(expected, ''.join(self._output_stream.output_data))
    def testSkips(self):
        results = page_test_results.PageTestResults()
        results.WillRunPage(self.pages[0])
        results.AddValue(skip.SkipValue(self.pages[0], 'testing reason'))
        results.DidRunPage(self.pages[0])

        results.WillRunPage(self.pages[1])
        results.DidRunPage(self.pages[1])

        self.assertTrue(results.all_page_runs[0].skipped)
        self.assertEqual(self.pages[0], results.all_page_runs[0].story)
        self.assertEqual(set([self.pages[0], self.pages[1]]),
                         results.pages_that_succeeded)

        self.assertEqual(2, len(results.all_page_runs))
        self.assertTrue(results.all_page_runs[0].skipped)
        self.assertTrue(results.all_page_runs[1].ok)
示例#10
0
    def testSkipValueCannotBeFiltered(self):
        def AcceptValueNamed_a(value, _):
            return value.name == 'a'

        results = page_test_results.PageTestResults(
            value_can_be_added_predicate=AcceptValueNamed_a)
        results.WillRunPage(self.pages[0])
        skip_value = skip.SkipValue(self.pages[0], 'skip for testing')
        results.AddValue(scalar.ScalarValue(self.pages[0], 'b', 'seconds', 8))
        results.AddValue(skip_value)
        results.DidRunPage(self.pages[0])
        results.PrintSummary()

        # Although predicate says only accept value with named 'a', skip value is
        # added anyway.
        self.assertEquals(len(results.all_page_specific_values), 1)
        self.assertIn(skip_value, results.all_page_specific_values)
def _RunUserStoryAndProcessErrorIfNeeded(test, expectations, user_story,
                                         results, state):
    expectation = None

    def ProcessError():
        if expectation == 'fail':
            msg = 'Expected exception while running %s' % user_story.display_name
            exception_formatter.PrintFormattedException(msg=msg)
        else:
            msg = 'Exception while running %s' % user_story.display_name
            results.AddValue(failure.FailureValue(user_story, sys.exc_info()))

    try:
        state.WillRunUserStory(user_story)
        expectation, skip_value = state.GetTestExpectationAndSkipValue(
            expectations)
        if expectation == 'skip':
            assert skip_value
            results.AddValue(skip_value)
            return
        state.RunUserStory(results)
    except page_test.TestNotSupportedOnPlatformFailure:
        raise
    except (page_test.Failure, util.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException):
        ProcessError()
    except exceptions.AppCrashException:
        ProcessError()
        state.TearDownState(results)
        if test.is_multi_tab_test:
            logging.error(
                'Aborting multi-tab test after browser or tab crashed at '
                'user story %s' % user_story.display_name)
            test.RequestExit()
            return
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(user_story, 'Unsupported page action: %s' % e))
    else:
        if expectation == 'fail':
            logging.warning('%s was expected to fail, but passed.\n',
                            user_story.display_name)
    finally:
        state.DidRunUserStory(results)
示例#12
0
    def testOutputSkipInformation(self):
        test_page_set = _MakePageSet()
        self._reporter = gtest_progress_reporter.GTestProgressReporter(
            self._output_stream, output_skipped_tests_summary=True)
        results = page_test_results.PageTestResults(
            progress_reporter=self._reporter)
        results.WillRunPage(test_page_set.pages[0])
        self._mock_timer.SetTime(0.007)
        results.AddValue(
            skip.SkipValue(test_page_set.pages[0],
                           'Page skipped for testing reason'))
        results.DidRunPage(test_page_set.pages[0])

        results.PrintSummary()
        expected = ('[ RUN      ] http://www.foo.com/\n'
                    '===== SKIPPING TEST http://www.foo.com/:'
                    ' Page skipped for testing reason =====\n'
                    '[       OK ] http://www.foo.com/ (7 ms)\n'
                    '[  PASSED  ] 1 test.\n'
                    '\n'
                    'Skipped pages:\n'
                    'http://www.foo.com/\n'
                    '\n')
        self.assertEquals(expected, ''.join(self._output_stream.output_data))
    def testPassesNoSkips(self):
        results = page_test_results.PageTestResults()
        results.WillRunPage(self.pages[0])
        results.AddValue(
            failure.FailureValue(self.pages[0], self.CreateException()))
        results.DidRunPage(self.pages[0])

        results.WillRunPage(self.pages[1])
        results.DidRunPage(self.pages[1])

        results.WillRunPage(self.pages[2])
        results.AddValue(skip.SkipValue(self.pages[2], 'testing reason'))
        results.DidRunPage(self.pages[2])

        self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
        self.assertEqual(set([self.pages[1], self.pages[2]]),
                         results.pages_that_succeeded)
        self.assertEqual(set([self.pages[1]]),
                         results.pages_that_succeeded_and_not_skipped)

        self.assertEqual(3, len(results.all_page_runs))
        self.assertTrue(results.all_page_runs[0].failed)
        self.assertTrue(results.all_page_runs[1].ok)
        self.assertTrue(results.all_page_runs[2].skipped)
示例#14
0
 def Skip(self, reason):
     assert self._current_page_run, 'Not currently running test.'
     self.AddValue(skip.SkipValue(self.current_page, reason))
def Run(test, user_story_set, expectations, finder_options, results):
    """Runs a given test against a given page_set with the given options."""
    test.ValidatePageSet(user_story_set)

    # Reorder page set based on options.
    user_stories = _ShuffleAndFilterUserStorySet(user_story_set,
                                                 finder_options)

    if (not finder_options.use_live_sites
            and finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD
            and
            # TODO(nednguyen): also handle these logic for user_story_set in next
            # patch.
            isinstance(user_story_set, page_set_module.PageSet)):
        _UpdateUserStoryArchivesIfChanged(user_story_set)
        if not _CheckArchives(user_story_set.archive_data_file,
                              user_story_set.wpr_archive_info,
                              user_story_set.pages):
            return

    for user_story in list(user_stories):
        if not test.CanRunForPage(user_story):
            results.WillRunPage(user_story)
            logging.debug('Skipping test: it cannot run for %s',
                          user_story.display_name)
            results.AddValue(skip.SkipValue(user_story, 'Test cannot run'))
            results.DidRunPage(user_story)
            user_stories.remove(user_story)

    if not user_stories:
        return

    user_story_with_discarded_first_results = set()
    max_failures = finder_options.max_failures  # command-line gets priority
    if max_failures is None:
        max_failures = test.max_failures  # may be None
    user_story_groups = GetUserStoryGroupsWithSameSharedUserStoryClass(
        user_stories)

    test.WillRunTest(finder_options)
    for group in user_story_groups:
        state = None
        try:
            state = group.shared_user_story_state_class(
                test, finder_options, user_story_set)
            for _ in xrange(finder_options.pageset_repeat):
                for user_story in group.user_stories:
                    if test.IsExiting():
                        break
                    for _ in xrange(finder_options.page_repeat):
                        results.WillRunPage(user_story)
                        try:
                            _WaitForThermalThrottlingIfNeeded(state.platform)
                            _RunUserStoryAndProcessErrorIfNeeded(
                                test, expectations, user_story, results, state)
                        except Exception:
                            # Tear down & restart the state for unhandled exceptions thrown by
                            # _RunUserStoryAndProcessErrorIfNeeded.
                            results.AddValue(
                                failure.FailureValue(user_story,
                                                     sys.exc_info()))
                            state.TearDownState(results)
                            state = group.shared_user_story_state_class(
                                test, finder_options, user_story_set)
                        finally:
                            _CheckThermalThrottling(state.platform)
                            discard_run = (
                                test.discard_first_result and user_story
                                not in user_story_with_discarded_first_results)
                            if discard_run:
                                user_story_with_discarded_first_results.add(
                                    user_story)
                            results.DidRunPage(user_story,
                                               discard_run=discard_run)
                    if max_failures is not None and len(
                            results.failures) > max_failures:
                        logging.error('Too many failures. Aborting.')
                        test.RequestExit()
        finally:
            if state:
                state.TearDownState(results)
示例#16
0
    if not os.path.exists(credentials_path):
      credentials_path = None

  # Set up user agent.
  browser_options.browser_user_agent_type = page_set.user_agent_type or None

  if finder_options.profiler:
    profiler_class = profiler_finder.FindProfiler(finder_options.profiler)
    profiler_class.CustomizeBrowserOptions(browser_options.browser_type,
                                           finder_options)

  for page in list(pages):
    if not test.CanRunForPage(page):
      results.WillRunPage(page)
      logging.debug('Skipping test: it cannot run for %s', page.url)
      results.AddValue(skip.SkipValue(page, 'Test cannot run'))
      results.DidRunPage(page)
      pages.remove(page)

  if not pages:
    return

  state = _RunState()
  pages_with_discarded_first_result = set()
  max_failures = finder_options.max_failures  # command-line gets priority
  if max_failures is None:
    max_failures = test.max_failures  # may be None

  try:
    test.WillRunTest(finder_options)
    for _ in xrange(finder_options.pageset_repeat):
示例#17
0
 def testAsDict(self):
   v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
   d = v.AsDictWithoutBaseClassEntries()
   self.assertEquals(d['reason'], 'page skipped for testing reason')
示例#18
0
def Run(test, user_story_set, expectations, finder_options, results,
        max_failures=None):
  """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the user story runner
  can continue running the remaining user stories.
  """
  # TODO(slamm): Remove special-case for PageTest. https://crbug.com/440101
  if isinstance(test, page_test.PageTest):
    test.ValidatePageSet(user_story_set)

  # Reorder page set based on options.
  user_stories = _ShuffleAndFilterUserStorySet(user_story_set, finder_options)

  if (not finder_options.use_live_sites and
      finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
    _UpdateUserStoryArchivesIfChanged(user_story_set)
    if not _UpdateAndCheckArchives(
        user_story_set.archive_data_file, user_story_set.wpr_archive_info,
        user_stories):
      return

  # TODO(slamm): Remove special-case for PageTest. https://crbug.com/440101
  if isinstance(test, page_test.PageTest):
    for user_story in list(user_stories):
      if not test.CanRunForPage(user_story):
        results.WillRunPage(user_story)
        logging.debug('Skipping test: it cannot run for %s',
                      user_story.display_name)
        results.AddValue(skip.SkipValue(user_story, 'Test cannot run'))
        results.DidRunPage(user_story)
        user_stories.remove(user_story)

  if not user_stories:
    return

  # Effective max failures gives priority to command-line flag value.
  effective_max_failures = finder_options.max_failures
  if effective_max_failures is None:
    effective_max_failures = max_failures

  user_story_groups = GetUserStoryGroupsWithSameSharedUserStoryClass(
      user_stories)
  user_story_with_discarded_first_results = set()

  for group in user_story_groups:
    state = None
    try:
      for _ in xrange(finder_options.pageset_repeat):
        for user_story in group.user_stories:
          for _ in xrange(finder_options.page_repeat):
            if not state:
              state = group.shared_user_story_state_class(
                  test, finder_options, user_story_set)
            results.WillRunPage(user_story)
            try:
              _WaitForThermalThrottlingIfNeeded(state.platform)
              _RunUserStoryAndProcessErrorIfNeeded(
                  expectations, user_story, results, state)
            except exceptions.AppCrashException:
              # Catch AppCrashException to give the story a chance to retry.
              # The retry is enabled by tearing down the state and creating
              # a new state instance in the next iteration.
              try:
                # If TearDownState raises, do not catch the exception.
                # (The AppCrashException was saved as a failure value.)
                state.TearDownState(results)
              finally:
                # Later finally-blocks use state, so ensure it is cleared.
                state = None
            finally:
              has_existing_exception = sys.exc_info() is not None
              try:
                if state:
                  _CheckThermalThrottling(state.platform)
                # TODO(slamm): Make discard_first_result part of user_story API.
                # https://crbug.com/440101
                discard_current_run = (
                    getattr(test, 'discard_first_result', False) and
                    user_story not in user_story_with_discarded_first_results)
                if discard_current_run:
                  user_story_with_discarded_first_results.add(user_story)
                results.DidRunPage(user_story, discard_run=discard_current_run)
              except Exception:
                if not has_existing_exception:
                  raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception from result processing:')
          if (effective_max_failures is not None and
              len(results.failures) > effective_max_failures):
            logging.error('Too many failures. Aborting.')
            return
    finally:
      if state:
        has_existing_exception = sys.exc_info() is not None
        try:
          state.TearDownState(results)
        except Exception:
          if not has_existing_exception:
            raise
          # Print current exception and propagate existing exception.
          exception_formatter.PrintFormattedException(
              msg='Exception from TearDownState:')
示例#19
0
def Run(test,
        story_set,
        finder_options,
        results,
        max_failures=None,
        expectations=None,
        metadata=None,
        max_num_values=sys.maxint):
    """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the story runner
  can continue running the remaining stories.
  """
    for s in story_set:
        ValidateStory(s)

    # Filter page set based on options.
    stories = story_module.StoryFilter.FilterStorySet(story_set)

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        serving_dirs = story_set.serving_dirs
        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, story_set.wpr_archive_info,
                stories):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    state = None
    device_info_diags = {}
    try:
        for storyset_repeat_counter in xrange(finder_options.pageset_repeat):
            for story in stories:
                if not state:
                    # Construct shared state by using a copy of finder_options. Shared
                    # state may update the finder_options. If we tear down the shared
                    # state after this story run, we want to construct the shared
                    # state for the next story from the original finder_options.
                    state = story_set.shared_state_class(
                        test, finder_options.Copy(), story_set)

                results.WillRunPage(story, storyset_repeat_counter)

                if expectations:
                    disabled = expectations.IsStoryDisabled(
                        story, state.platform, finder_options)
                    if disabled and not finder_options.run_disabled_tests:
                        results.AddValue(skip.SkipValue(story, disabled))
                        results.DidRunPage(story)
                        continue

                try:
                    state.platform.WaitForBatteryTemperature(35)
                    _WaitForThermalThrottlingIfNeeded(state.platform)
                    _RunStoryAndProcessErrorIfNeeded(story, results, state,
                                                     test)

                    num_values = len(results.all_page_specific_values)
                    if num_values > max_num_values:
                        msg = 'Too many values: %d > %d' % (num_values,
                                                            max_num_values)
                        results.AddValue(
                            failure.FailureValue.FromMessage(None, msg))

                    device_info_diags = _MakeDeviceInfoDiagnostics(state)
                except exceptions.Error:
                    # Catch all Telemetry errors to give the story a chance to retry.
                    # The retry is enabled by tearing down the state and creating
                    # a new state instance in the next iteration.
                    try:
                        # If TearDownState raises, do not catch the exception.
                        # (The Error was saved as a failure value.)
                        state.TearDownState()
                    finally:
                        # Later finally-blocks use state, so ensure it is cleared.
                        state = None
                finally:
                    has_existing_exception = sys.exc_info() != (None, None,
                                                                None)
                    try:
                        if state:
                            _CheckThermalThrottling(state.platform)
                        results.DidRunPage(story)
                    except Exception:  # pylint: disable=broad-except
                        if not has_existing_exception:
                            raise
                        # Print current exception and propagate existing exception.
                        exception_formatter.PrintFormattedException(
                            msg='Exception from result processing:')
                if (effective_max_failures is not None
                        and len(results.failures) > effective_max_failures):
                    logging.error('Too many failures. Aborting.')
                    return
    finally:
        results.PopulateHistogramSet(metadata)

        for name, diag in device_info_diags.iteritems():
            results.histograms.AddSharedDiagnostic(name, diag)

        tagmap = _GenerateTagMapFromStorySet(stories)
        if tagmap.tags_to_story_names:
            results.histograms.AddSharedDiagnostic(reserved_infos.TAG_MAP.name,
                                                   tagmap)

        if state:
            has_existing_exception = sys.exc_info() != (None, None, None)
            try:
                state.TearDownState()
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception from TearDownState:')
示例#20
0
def Run(test,
        story_set,
        finder_options,
        results,
        max_failures=None,
        tear_down_after_story=False,
        tear_down_after_story_set=False,
        expectations=None):
    """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the story runner
  can continue running the remaining stories.
  """
    for s in story_set:
        ValidateStory(s)

    # Filter page set based on options.
    stories = filter(story_module.StoryFilter.IsSelected, story_set)

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        serving_dirs = story_set.serving_dirs
        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, story_set.wpr_archive_info,
                stories):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    story_groups = StoriesGroupedByStateClass(
        stories, story_set.allow_mixed_story_states)

    for group in story_groups:
        state = None
        try:
            for storyset_repeat_counter in xrange(
                    finder_options.pageset_repeat):
                for story in group.stories:
                    if not state:
                        # Construct shared state by using a copy of finder_options. Shared
                        # state may update the finder_options. If we tear down the shared
                        # state after this story run, we want to construct the shared
                        # state for the next story from the original finder_options.
                        state = group.shared_state_class(
                            test, finder_options.Copy(), story_set)

                    results.WillRunPage(story, storyset_repeat_counter)

                    if expectations:
                        disabled = expectations.IsStoryDisabled(
                            story, state.platform)
                        if disabled and not finder_options.run_disabled_tests:
                            results.AddValue(skip.SkipValue(story, disabled))
                            results.DidRunPage(story)
                            continue

                    try:
                        state.platform.WaitForBatteryTemperature(35)
                        _WaitForThermalThrottlingIfNeeded(state.platform)
                        _RunStoryAndProcessErrorIfNeeded(
                            story, results, state, test)
                    except exceptions.Error:
                        # Catch all Telemetry errors to give the story a chance to retry.
                        # The retry is enabled by tearing down the state and creating
                        # a new state instance in the next iteration.
                        try:
                            # If TearDownState raises, do not catch the exception.
                            # (The Error was saved as a failure value.)
                            state.TearDownState()
                        finally:
                            # Later finally-blocks use state, so ensure it is cleared.
                            state = None
                    finally:
                        has_existing_exception = sys.exc_info() != (None, None,
                                                                    None)
                        try:
                            if state:
                                _CheckThermalThrottling(state.platform)
                            results.DidRunPage(story)
                        except Exception:
                            if not has_existing_exception:
                                raise
                            # Print current exception and propagate existing exception.
                            exception_formatter.PrintFormattedException(
                                msg='Exception from result processing:')
                        if state and tear_down_after_story:
                            state.TearDownState()
                            state = None
                    if (effective_max_failures is not None and
                            len(results.failures) > effective_max_failures):
                        logging.error('Too many failures. Aborting.')
                        return
                if state and tear_down_after_story_set:
                    state.TearDownState()
                    state = None
        finally:
            if state:
                has_existing_exception = sys.exc_info() != (None, None, None)
                try:
                    state.TearDownState()
                except Exception:
                    if not has_existing_exception:
                        raise
                    # Print current exception and propagate existing exception.
                    exception_formatter.PrintFormattedException(
                        msg='Exception from TearDownState:')
示例#21
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(description=None):
        state.DumpStateUponFailure(story, results)
        # Note: adding the FailureValue to the results object also normally
        # cause the progress_reporter to log it in the output.
        results.AddValue(
            failure.FailureValue(story, sys.exc_info(), description))

    try:
        # TODO(mikecase): Remove this logging once Android perf bots are swarmed.
        # crbug.com/678282
        if state.platform.GetOSName() == 'android':
            state.platform._platform_backend.Log(
                'START %s' % (story.name if story.name else str(story)))
        if isinstance(test, story_test.StoryTest):
            test.WillRunStory(state.platform)
        state.WillRunStory(story)
        if not state.CanRunStory(story):
            results.AddValue(
                skip.SkipValue(
                    story, 'Skipped because story is not supported '
                    '(SharedState.CanRunStory() returns False).'))
            return
        state.RunStory(results)
        if isinstance(test, story_test.StoryTest):
            test.Measure(state.platform, results)
    except (legacy_page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException,
            py_utils.TimeoutException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(story, 'Unsupported page action: %s' % e))
    except Exception:
        ProcessError(description='Unhandlable exception raised.')
        raise
    finally:
        has_existing_exception = (sys.exc_info() != (None, None, None))
        try:
            # We attempt to stop tracing and/or metric collecting before possibly
            # closing the browser. Closing the browser first and stopping tracing
            # later appeared to cause issues where subsequent browser instances would
            # not launch correctly on some devices (see: crbug.com/720317).
            # The following normally cause tracing and/or metric collecting to stop.
            if isinstance(test, story_test.StoryTest):
                test.DidRunStory(state.platform, results)
            else:
                test.DidRunPage(state.platform)
            # And the following normally causes the browser to be closed.
            state.DidRunStory(results)
            # TODO(mikecase): Remove this logging once Android perf bots are swarmed.
            # crbug.com/678282
            if state.platform.GetOSName() == 'android':
                state.platform._platform_backend.Log(
                    'END %s' % (story.name if story.name else str(story)))
        except Exception:
            if not has_existing_exception:
                state.DumpStateUponFailure(story, results)
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception raised when cleaning story run: ')
示例#22
0
 def Skip(self, reason, is_expected=True):
     self.AddValue(skip.SkipValue(self.story, reason, is_expected))
示例#23
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(exc=None, description=None):
        state.DumpStateUponFailure(story, results)

        # Dump app crash, if present
        if exc:
            if isinstance(exc, exceptions.AppCrashException):
                minidump_path = exc.minidump_path
                if minidump_path:
                    results.AddArtifact(story.name, 'minidump', minidump_path)

        # Note: adding the FailureValue to the results object also normally
        # cause the progress_reporter to log it in the output.
        results.AddValue(
            failure.FailureValue(story, sys.exc_info(), description))

    with CaptureLogsAsArtifacts(results, story.name):
        try:
            if isinstance(test, story_test.StoryTest):
                test.WillRunStory(state.platform)
            state.WillRunStory(story)

            if not state.CanRunStory(story):
                results.AddValue(
                    skip.SkipValue(
                        story, 'Skipped because story is not supported '
                        '(SharedState.CanRunStory() returns False).'))
                return
            state.RunStory(results)
            if isinstance(test, story_test.StoryTest):
                test.Measure(state.platform, results)
        except (legacy_page_test.Failure, exceptions.TimeoutException,
                exceptions.LoginException, exceptions.ProfilingException,
                py_utils.TimeoutException) as exc:
            ProcessError(exc)
        except exceptions.Error as exc:
            ProcessError(exc)
            raise
        except page_action.PageActionNotSupported as exc:
            results.AddValue(
                skip.SkipValue(story, 'Unsupported page action: %s' % exc))
        except Exception:
            ProcessError(description='Unhandlable exception raised.')
            raise
        finally:
            has_existing_exception = (sys.exc_info() != (None, None, None))
            try:
                # We attempt to stop tracing and/or metric collecting before possibly
                # closing the browser. Closing the browser first and stopping tracing
                # later appeared to cause issues where subsequent browser instances
                # would not launch correctly on some devices (see: crbug.com/720317).
                # The following normally cause tracing and/or metric collecting to stop.
                if isinstance(test, story_test.StoryTest):
                    test.DidRunStory(state.platform, results)
                else:
                    test.DidRunPage(state.platform)
                # And the following normally causes the browser to be closed.
                state.DidRunStory(results)
            except Exception as exc:  # pylint: disable=broad-except
                if not has_existing_exception:
                    state.DumpStateUponFailure(story, results, exc)
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception raised when cleaning story run: ')
示例#24
0
 def testAsDict(self):
     v = skip.SkipValue(self.pages[0], 'page skipped for testing reason',
                        False)
     d = v.AsDict()
     self.assertEquals(d['reason'], 'page skipped for testing reason')
     self.assertEquals(d['is_expected'], False)
示例#25
0
 def Skip(self, reason, is_expected=True):
     assert self._current_page_run, 'Not currently running test.'
     self.AddValue(skip.SkipValue(self.current_page, reason, is_expected))
示例#26
0
 def Skip(self, reason):
     self.AddValue(skip.SkipValue(self.story, reason))
示例#27
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(description=None):
        state.DumpStateUponFailure(story, results)
        # Note: adding the FailureValue to the results object also normally
        # cause the progress_reporter to log it in the output.
        results.AddValue(
            failure.FailureValue(story, sys.exc_info(), description))

    try:
        # TODO(mikecase): Remove this logging once Android perf bots are swarmed.
        # crbug.com/678282
        if state.platform.GetOSName() == 'android':
            state.platform._platform_backend.Log(
                'START %s' % (story.name if story.name else str(story)))
        if isinstance(test, story_test.StoryTest):
            test.WillRunStory(state.platform)
        state.WillRunStory(story)
        if not state.CanRunStory(story):
            results.AddValue(
                skip.SkipValue(
                    story, 'Skipped because story is not supported '
                    '(SharedState.CanRunStory() returns False).'))
            return
        state.RunStory(results)
        if isinstance(test, story_test.StoryTest):
            test.Measure(state.platform, results)
    except (legacy_page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException,
            py_utils.TimeoutException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(story, 'Unsupported page action: %s' % e))
    except Exception:
        ProcessError(description='Unhandlable exception raised.')
        raise
    finally:
        has_existing_exception = (sys.exc_info() != (None, None, None))
        try:
            state.DidRunStory(results)
            # if state.DidRunStory raises exception, things are messed up badly and we
            # do not need to run test.DidRunStory at that point.
            if isinstance(test, story_test.StoryTest):
                test.DidRunStory(state.platform, results)
            else:
                test.DidRunPage(state.platform)
            # TODO(mikecase): Remove this logging once Android perf bots are swarmed.
            # crbug.com/678282
            if state.platform.GetOSName() == 'android':
                state.platform._platform_backend.Log(
                    'END %s' % (story.name if story.name else str(story)))
        except Exception:
            if not has_existing_exception:
                state.DumpStateUponFailure(story, results)
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception raised when cleaning story run: ')