Exemple #1
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(description=None):
        state.DumpStateUponFailure(story, results)
        # Note: adding the FailureValue to the results object also normally
        # cause the progress_reporter to log it in the output.
        results.AddValue(
            failure.FailureValue(story, sys.exc_info(), description))

    try:
        if isinstance(test, story_test.StoryTest):
            test.WillRunStory(state.platform)
        state.WillRunStory(story)
        if not state.CanRunStory(story):
            results.AddValue(
                skip.SkipValue(
                    story, 'Skipped because story is not supported '
                    '(SharedState.CanRunStory() returns False).'))
            return
        state.RunStory(results)
        if isinstance(test, story_test.StoryTest):
            test.Measure(state.platform, results)
    except (legacy_page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException,
            py_utils.TimeoutException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(story, 'Unsupported page action: %s' % e))
    except Exception:
        ProcessError(description='Unhandlable exception raised.')
        raise
    finally:
        has_existing_exception = (sys.exc_info() != (None, None, None))
        try:
            # We attempt to stop tracing and/or metric collecting before possibly
            # closing the browser. Closing the browser first and stopping tracing
            # later appeared to cause issues where subsequent browser instances would
            # not launch correctly on some devices (see: crbug.com/720317).
            # The following normally cause tracing and/or metric collecting to stop.
            if isinstance(test, story_test.StoryTest):
                test.DidRunStory(state.platform, results)
            else:
                test.DidRunPage(state.platform)
            # And the following normally causes the browser to be closed.
            state.DidRunStory(results)
        except Exception:
            if not has_existing_exception:
                state.DumpStateUponFailure(story, results)
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception raised when cleaning story run: ')
 def RunStory(self, results):
   try:
     self._PreparePage()
     self._current_page.Run(self)
     self._test.ValidateAndMeasurePage(
         self._current_page, self._current_tab, results)
   except exceptions.Error:
     if self._test.is_multi_tab_test:
       # Avoid trying to recover from an unknown multi-tab state.
       exception_formatter.PrintFormattedException(
           msg='Telemetry Error during multi tab test:')
       raise legacy_page_test.MultiTabTestAppCrashError
     raise
Exemple #3
0
    def Run(self, finder_options):
        """Run this test with the given options.

    Returns:
      The number of failure values (up to 254) or 255 if there is an uncaught
      exception.
    """
        self.CustomizeBrowserOptions(finder_options.browser_options)

        pt = self.CreatePageTest(finder_options)
        pt.__name__ = self.__class__.__name__

        if hasattr(self, '_disabled_strings'):
            # pylint: disable=protected-access
            pt._disabled_strings = self._disabled_strings
        if hasattr(self, '_enabled_strings'):
            # pylint: disable=protected-access
            pt._enabled_strings = self._enabled_strings

        expectations = self.CreateExpectations()
        us = self.CreateStorySet(finder_options)
        if isinstance(pt, page_test.PageTest):
            if any(not isinstance(p, page.Page) for p in us.user_stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page user stories.')

        benchmark_metadata = self.GetMetadata()
        with results_options.CreateResults(
                benchmark_metadata, finder_options,
                self.ValueCanBeAddedPredicate) as results:
            try:
                story_runner.Run(pt,
                                 us,
                                 expectations,
                                 finder_options,
                                 results,
                                 max_failures=self._max_failures)
                return_code = min(254, len(results.failures))
            except Exception:
                exception_formatter.PrintFormattedException()
                return_code = 255

            bucket = cloud_storage.BUCKET_ALIASES[finder_options.upload_bucket]
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)

            results.PrintSummary()
        return return_code
Exemple #4
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
  benchmark.CustomizeBrowserOptions(finder_options.browser_options)

  possible_browser = browser_finder.FindBrowser(finder_options)
  if possible_browser and benchmark.ShouldDisable(possible_browser):
    return 1

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  if hasattr(benchmark, '_disabled_strings'):
    # pylint: disable=protected-access
    pt._disabled_strings = benchmark._disabled_strings
  if hasattr(benchmark, '_enabled_strings'):
    # pylint: disable=protected-access
    pt._enabled_strings = benchmark._enabled_strings

  stories = benchmark.CreateStorySet(finder_options)
  if isinstance(pt, page_test.PageTest):
    if any(not isinstance(p, page.Page) for p in stories.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  benchmark_metadata = benchmark.GetMetadata()
  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      benchmark.ValueCanBeAddedPredicate) as results:
    try:
      Run(pt, stories, finder_options, results, benchmark.max_failures)
      return_code = min(254, len(results.failures))
    except Exception:
      exception_formatter.PrintFormattedException()
      return_code = 255

    try:
      bucket = cloud_storage.BUCKET_ALIASES[finder_options.upload_bucket]
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud(bucket)
        results.UploadProfilingFilesToCloud(bucket)
    finally:
      results.PrintSummary()
  return return_code
Exemple #5
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(description=None):
        state.DumpStateUponFailure(story, results)
        results.AddValue(
            failure.FailureValue(story, sys.exc_info(), description))

    try:
        if isinstance(test, story_test.StoryTest):
            test.WillRunStory(state.platform)
        state.WillRunStory(story)
        if not state.CanRunStory(story):
            results.AddValue(
                skip.SkipValue(
                    story, 'Skipped because story is not supported '
                    '(SharedState.CanRunStory() returns False).'))
            return
        state.RunStory(results)
        if isinstance(test, story_test.StoryTest):
            test.Measure(state.platform, results)
    except (legacy_page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException,
            py_utils.TimeoutException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(story, 'Unsupported page action: %s' % e))
    except Exception:
        ProcessError(description='Unhandlable exception raised.')
        raise
    finally:
        has_existing_exception = (sys.exc_info() != (None, None, None))
        try:
            state.DidRunStory(results)
            # if state.DidRunStory raises exception, things are messed up badly and we
            # do not need to run test.DidRunStory at that point.
            if isinstance(test, story_test.StoryTest):
                test.DidRunStory(state.platform)
            else:
                test.DidRunPage(state.platform)
        except Exception:
            if not has_existing_exception:
                state.DumpStateUponFailure(story, results)
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception raised when cleaning story run: ')
Exemple #6
0
def _RunUserStoryAndProcessErrorIfNeeded(expectations, user_story, results,
                                         state):
    def ProcessError():
        if expectation == 'fail':
            msg = 'Expected exception while running %s' % user_story.display_name
            exception_formatter.PrintFormattedException(msg=msg)
        else:
            msg = 'Exception while running %s' % user_story.display_name
            results.AddValue(failure.FailureValue(user_story, sys.exc_info()))

    try:
        expectation = None
        state.WillRunUserStory(user_story)
        expectation, skip_value = state.GetTestExpectationAndSkipValue(
            expectations)
        if expectation == 'skip':
            assert skip_value
            results.AddValue(skip_value)
            return
        state.RunUserStory(results)
    except (page_test.Failure, exceptions.TimeoutException,
            exceptions.LoginException, exceptions.ProfilingException):
        ProcessError()
    except exceptions.Error:
        ProcessError()
        raise
    except page_action.PageActionNotSupported as e:
        results.AddValue(
            skip.SkipValue(user_story, 'Unsupported page action: %s' % e))
    except Exception:
        results.AddValue(
            failure.FailureValue(user_story, sys.exc_info(),
                                 'Unhandlable exception raised.'))
        raise
    else:
        if expectation == 'fail':
            logging.warning('%s was expected to fail, but passed.\n',
                            user_story.display_name)
    finally:
        has_existing_exception = sys.exc_info() is not None
        try:
            state.DidRunUserStory(results)
        except Exception:
            if not has_existing_exception:
                raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception from DidRunUserStory: ')
Exemple #7
0
 def RunStory(self, results):
     try:
         self._PreparePage()
         self._ImplicitPageNavigation()
         action_runner = action_runner_module.ActionRunner(
             self._current_tab, skip_waits=self._current_page.skip_waits)
         self._current_page.RunPageInteractions(action_runner)
         self._test.ValidateAndMeasurePage(self._current_page,
                                           self._current_tab, results)
     except exceptions.Error:
         if self._test.is_multi_tab_test:
             # Avoid trying to recover from an unknown multi-tab state.
             exception_formatter.PrintFormattedException(
                 msg='Telemetry Error during multi tab test:')
             raise page_test.MultiTabTestAppCrashError
         raise
Exemple #8
0
    def __init__(self, backend, platform_backend, credentials_path):
        super(Browser, self).__init__(app_backend=backend,
                                      platform_backend=platform_backend)
        try:
            self._browser_backend = backend
            self._platform_backend = platform_backend
            self._tabs = tab_list.TabList(backend.tab_list_backend)
            self.credentials = browser_credentials.BrowserCredentials()
            self.credentials.credentials_path = credentials_path
            self._platform_backend.DidCreateBrowser(self,
                                                    self._browser_backend)
            browser_options = self._browser_backend.browser_options
            self.platform.FlushDnsCache()
            if browser_options.clear_sytem_cache_for_browser_and_profile_on_start:
                if self.platform.CanFlushIndividualFilesFromSystemCache():
                    self.platform.FlushSystemCacheForDirectory(
                        self._browser_backend.profile_directory)
                    self.platform.FlushSystemCacheForDirectory(
                        self._browser_backend.browser_directory)
                elif self.platform.SupportFlushEntireSystemCache():
                    self.platform.FlushEntireSystemCache()
                else:
                    logging.warning('Flush system cache is not supported. ' +
                                    'Did not flush system cache.')

            self._browser_backend.SetBrowser(self)
            self._browser_backend.Start()
            self._LogBrowserInfo()
            self._platform_backend.DidStartBrowser(self, self._browser_backend)
            self._profiling_controller = profiling_controller.ProfilingController(
                self._browser_backend.profiling_controller_backend)
        except Exception:
            exc_info = sys.exc_info()
            logging.error(
                'Failed with %s while starting the browser backend.',
                exc_info[0].__name__)  # Show the exception name only.
            try:
                self._platform_backend.WillCloseBrowser(
                    self, self._browser_backend)
            except Exception:
                exception_formatter.PrintFormattedException(
                    msg='Exception raised while closing platform backend')
            raise exc_info[0], exc_info[1], exc_info[2]
Exemple #9
0
def FetchBinaryDepdencies(platform, client_configs,
                          fetch_reference_chrome_binary):
    """ Fetch all binary dependenencies for the given |platform|.

  Note: we don't fetch browser binaries by default because the size of the
  binary is about 2Gb, and it requires cloud storage permission to
  chrome-telemetry bucket.

  Args:
    platform: an instance of telemetry.core.platform
    client_configs: A list of paths (string) to dependencies json files.
    fetch_reference_chrome_binary: whether to fetch reference chrome binary for
      the given platform.
  """
    configs = [dependency_manager.BaseConfig(TELEMETRY_PROJECT_CONFIG)]
    dep_manager = dependency_manager.DependencyManager(configs)
    target_platform = '%s_%s' % (platform.GetOSName(), platform.GetArchName())
    dep_manager.PrefetchPaths(target_platform)

    if platform.GetOSName() == 'android':
        host_platform = '%s_%s' % (platform_module.GetHostPlatform().GetOSName(
        ), platform_module.GetHostPlatform().GetArchName())
        dep_manager.PrefetchPaths(host_platform)

    if fetch_reference_chrome_binary:
        _FetchReferenceBrowserBinary(platform)

    # For now, handle client config separately because the BUILD.gn & .isolate of
    # telemetry tests in chromium src failed to include the files specified in its
    # client config.
    # (https://github.com/catapult-project/catapult/issues/2192)
    # For now this is ok because the client configs usually don't include cloud
    # storage infos.
    # TODO(nednguyen): remove the logic of swallowing exception once the issue is
    # fixed on Chromium side.
    if client_configs:
        manager = dependency_manager.DependencyManager(
            list(dependency_manager.BaseConfig(c) for c in client_configs))
        try:
            manager.PrefetchPaths(target_platform)
        except Exception:
            exception_formatter.PrintFormattedException()
  def RemoveTestCa(self):
    """Remove root CA generated by previous call to InstallTestCa().

    Removes the test root certificate from both the device and host machine.
    """
    if not self._wpr_ca_cert_path:
      return

    if self._is_test_ca_installed:
      try:
        self._device_cert_util.remove_cert()
      except Exception:
        # Best effort cleanup - show the error and continue.
        exception_formatter.PrintFormattedException(
          msg=('Error while trying to remove certificate authority: %s. '
               % str(self._device)))
      self._is_test_ca_installed = False

    shutil.rmtree(os.path.dirname(self._wpr_ca_cert_path), ignore_errors=True)
    self._wpr_ca_cert_path = None
    self._device_cert_util = None
 def RunStory(self, results):
   try:
     self._PreparePage()
     self._current_page.Run(self)
     self._test.ValidateAndMeasurePage(
         self._current_page, self._current_tab, results)
   except exceptions.Error:
     if self._finder_options.browser_options.take_screenshot_for_failed_page:
       self._TryCaptureScreenShot(self._current_page, self._current_tab,
                                  results)
     if self._test.is_multi_tab_test:
       # Avoid trying to recover from an unknown multi-tab state.
       exception_formatter.PrintFormattedException(
           msg='Telemetry Error during multi tab test:')
       raise page_test.MultiTabTestAppCrashError
     raise
   except Exception:
     if self._finder_options.browser_options.take_screenshot_for_failed_page:
       self._TryCaptureScreenShot(self._current_page, self._current_tab,
                                  results)
     raise
Exemple #12
0
def Run(test,
        story_set,
        finder_options,
        results,
        max_failures=None,
        tear_down_after_story=False,
        tear_down_after_story_set=False):
    """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the story runner
  can continue running the remaining stories.
  """
    # Filter page set based on options.
    stories = filter(story_module.StoryFilter.IsSelected, story_set)

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        serving_dirs = story_set.serving_dirs
        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, story_set.wpr_archive_info,
                stories):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    story_groups = StoriesGroupedByStateClass(
        stories, story_set.allow_mixed_story_states)

    for group in story_groups:
        state = None
        try:
            for storyset_repeat_counter in xrange(
                    finder_options.pageset_repeat):
                for story in group.stories:
                    for story_repeat_counter in xrange(
                            finder_options.page_repeat):
                        if not state:
                            # Construct shared state by using a copy of finder_options. Shared
                            # state may update the finder_options. If we tear down the shared
                            # state after this story run, we want to construct the shared
                            # state for the next story from the original finder_options.
                            state = group.shared_state_class(
                                test, finder_options.Copy(), story_set)
                        results.WillRunPage(story, storyset_repeat_counter,
                                            story_repeat_counter)
                        try:
                            _WaitForThermalThrottlingIfNeeded(state.platform)
                            _RunStoryAndProcessErrorIfNeeded(
                                story, results, state, test)
                        except exceptions.Error:
                            # Catch all Telemetry errors to give the story a chance to retry.
                            # The retry is enabled by tearing down the state and creating
                            # a new state instance in the next iteration.
                            try:
                                # If TearDownState raises, do not catch the exception.
                                # (The Error was saved as a failure value.)
                                state.TearDownState()
                            finally:
                                # Later finally-blocks use state, so ensure it is cleared.
                                state = None
                        finally:
                            has_existing_exception = sys.exc_info() != (
                                None, None, None)
                            try:
                                if state:
                                    _CheckThermalThrottling(state.platform)
                                results.DidRunPage(story)
                            except Exception:
                                if not has_existing_exception:
                                    raise
                                # Print current exception and propagate existing exception.
                                exception_formatter.PrintFormattedException(
                                    msg='Exception from result processing:')
                            if state and tear_down_after_story:
                                state.TearDownState()
                                state = None
                    if (effective_max_failures is not None and
                            len(results.failures) > effective_max_failures):
                        logging.error('Too many failures. Aborting.')
                        return
                if state and tear_down_after_story_set:
                    state.TearDownState()
                    state = None
        finally:
            if state:
                has_existing_exception = sys.exc_info() != (None, None, None)
                try:
                    state.TearDownState()
                except Exception:
                    if not has_existing_exception:
                        raise
                    # Print current exception and propagate existing exception.
                    exception_formatter.PrintFormattedException(
                        msg='Exception from TearDownState:')
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations

    target_platform = None
    if possible_browser:
        target_platform = possible_browser.platform
    else:
        target_platform = platform_module.GetHostPlatform()

    can_run_on_platform = benchmark._CanRunOnPlatform(target_platform,
                                                      finder_options)

    expectations_disabled = False
    # For now, test expectations are only applicable in the cases where the
    # testing target involves a browser.
    if possible_browser:
        expectations_disabled = expectations.IsBenchmarkDisabled(
            possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       _GetPageSetRepeat(finder_options))
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnostic(reserved_infos.DOCUMENTATION_URLS.name,
                                        benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
def Run(test,
        story_set,
        finder_options,
        results,
        max_failures=None,
        expectations=None,
        max_num_values=sys.maxint):
    """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the story runner
  can continue running the remaining stories.
  """
    for s in story_set:
        ValidateStory(s)

    # Filter page set based on options.
    stories = story_module.StoryFilter.FilterStorySet(story_set)

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        serving_dirs = story_set.serving_dirs
        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, story_set.wpr_archive_info,
                stories):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    state = None
    device_info_diags = {}
    try:
        pageset_repeat = _GetPageSetRepeat(finder_options)
        if finder_options.smoke_test_mode:
            pageset_repeat = 1
        for storyset_repeat_counter in xrange(pageset_repeat):
            for story in stories:
                start_timestamp = time.time()
                if not state:
                    # Construct shared state by using a copy of finder_options. Shared
                    # state may update the finder_options. If we tear down the shared
                    # state after this story run, we want to construct the shared
                    # state for the next story from the original finder_options.
                    state = story_set.shared_state_class(
                        test, finder_options.Copy(), story_set)

                results.WillRunPage(story, storyset_repeat_counter)
                story_run = results.current_page_run

                if expectations:
                    disabled = expectations.IsStoryDisabled(
                        story, state.platform, finder_options)
                    if disabled and not finder_options.run_disabled_tests:
                        results.Skip(disabled)
                        results.DidRunPage(story)
                        continue

                try:
                    if state.platform:
                        state.platform.WaitForBatteryTemperature(35)
                        _WaitForThermalThrottlingIfNeeded(state.platform)
                    _RunStoryAndProcessErrorIfNeeded(story, results, state,
                                                     test)

                    num_values = len(results.all_page_specific_values)
                    # TODO(#4259): Convert this to an exception-based failure
                    if num_values > max_num_values:
                        msg = 'Too many values: %d > %d' % (num_values,
                                                            max_num_values)
                        logging.error(msg)
                        results.Fail(msg)

                    device_info_diags = _MakeDeviceInfoDiagnostics(state)
                except exceptions.Error:
                    # Catch all Telemetry errors to give the story a chance to retry.
                    # The retry is enabled by tearing down the state and creating
                    # a new state instance in the next iteration.
                    try:
                        # If TearDownState raises, do not catch the exception.
                        # (The Error was saved as a failure value.)
                        state.TearDownState()
                    finally:
                        # Later finally-blocks use state, so ensure it is cleared.
                        state = None
                finally:
                    has_existing_exception = sys.exc_info() != (None, None,
                                                                None)
                    try:
                        if state and state.platform:
                            _CheckThermalThrottling(state.platform)
                        results.DidRunPage(story)
                        story_run.SetDuration(time.time() - start_timestamp)
                    except Exception:  # pylint: disable=broad-except
                        if not has_existing_exception:
                            raise
                        # Print current exception and propagate existing exception.
                        exception_formatter.PrintFormattedException(
                            msg='Exception from result processing:')
                if (effective_max_failures is not None
                        and results.num_failed > effective_max_failures):
                    logging.error('Too many failures. Aborting.')
                    return
    finally:
        results.PopulateHistogramSet()

        for name, diag in device_info_diags.iteritems():
            results.AddSharedDiagnostic(name, diag)

        tagmap = _GenerateTagMapFromStorySet(stories)
        if tagmap.tags_to_story_names:
            results.AddSharedDiagnostic(reserved_infos.TAG_MAP.name, tagmap)

        if state:
            has_existing_exception = sys.exc_info() != (None, None, None)
            try:
                state.TearDownState()
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception from TearDownState:')
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1
    if (possible_browser and
            not decorators.IsBenchmarkEnabled(benchmark, possible_browser)):
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            print 'Try --also-run-disabled-tests to force the benchmark to run.'
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    benchmark.ValueCanBeAddedPredicate,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting domain.shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    with results_options.CreateResults(benchmark_metadata,
                                       finder_options,
                                       benchmark.ValueCanBeAddedPredicate,
                                       benchmark_enabled=True) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'BenchmarkDuration', 'minutes',
                                   duration / 60.0))
            results.PrintSummary()
    return return_code
Exemple #16
0
def RunBenchmark(benchmark, story_set, possible_browser,
                 browser_options, finder_options):
  """Run the benchmark on the given browser with given options.

  Args:
    benchmark: an instance of Benchmark class, a benchmark to be run.
    story_set: and instance of StorySet, a collection of stories.
    possible_browser: an instance of PossibleBrowser.
    browser_options: options to be passed to browser.
    finder_options: options controlling the execution of benchmark. This
      can be an instance of BrowserFinderOptions class, but only options
      relevant to benchmark execution will be read.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
  benchmark_metadata = benchmark.GetMetadata()
  page_test = benchmark.CreatePageTest(finder_options)
  page_test.__name__ = benchmark.__class__.__name__

  if isinstance(page_test, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in story_set):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      should_add_value=benchmark.ShouldAddValue,
      benchmark_enabled=True) as results:
    try:
      RunStorySet(
          page_test, story_set, possible_browser,
          benchmark.expectations, browser_options, finder_options, results,
          benchmark.max_failures, benchmark.MAX_NUM_VALUES)
      return_code = 1 if results.had_failures else 0
    except Exception as e: # pylint: disable=broad-except
      logging.fatal(
          'Benchmark execution interrupted by a fatal exception: %s(%s)' %
          (type(e), e))
      results.InterruptBenchmark(story_set, finder_options.pageset_repeat)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadArtifactsToCloud()
    finally:
      memory_debug.LogHostMemoryUsage()
      results.PrintSummary()
  return return_code
Exemple #17
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1

    can_run_on_platform = benchmark._CanRunOnPlatform(
        possible_browser.platform, finder_options)

    expectations_disabled = expectations.IsBenchmarkDisabled(
        possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                metadata=benchmark.GetMetadata(),
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except
            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')
            results.telemetry_info.InterruptBenchmark()
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadProfilingFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'benchmark_duration', 'minutes',
                                   duration / 60.0))
            results.AddDurationHistogram(duration * 1000.0)
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
Exemple #18
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            print('No browser of type "%s" found for running benchmark "%s".' %
                  (finder_options.browser_options.browser_type,
                   benchmark.Name()))
            return -1
        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return -1

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')

        results.AddSharedDiagnostics(
            architecture=possible_browser.platform.GetArchName(),
            device_id=possible_browser.platform.GetDeviceId(),
            os_name=possible_browser.platform.GetOSName(),
            os_version=possible_browser.platform.GetOSVersionName(),
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
        )

        try:
            RunStorySet(test, story_set, finder_options, results,
                        benchmark.max_failures)
            if results.benchmark_interrupted:
                return_code = 2
            elif results.had_failures:
                return_code = 1
            elif results.had_successes:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = 2
    return return_code
Exemple #19
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
  start = time.time()
  benchmark.CustomizeBrowserOptions(finder_options.browser_options)

  benchmark_metadata = benchmark.GetMetadata()
  possible_browser = browser_finder.FindBrowser(finder_options)
  expectations = benchmark.InitializeExpectations()

  if not possible_browser:
    print ('Cannot find browser of type %s. To list out all '
           'available browsers, rerun your command with '
           '--browser=list' %  finder_options.browser_options.browser_type)
    return 1

  can_run_on_platform = benchmark._CanRunOnPlatform(possible_browser.platform,
                                                    finder_options)

  # TODO(rnephew): Remove decorators.IsBenchmarkEnabled and IsBenchmarkDisabled
  # when we have fully moved to _CanRunOnPlatform().
  permanently_disabled = expectations.IsBenchmarkDisabled(
      possible_browser.platform, finder_options)
  temporarily_disabled = not decorators.IsBenchmarkEnabled(
      benchmark, possible_browser)

  if permanently_disabled or temporarily_disabled or not can_run_on_platform:
    print '%s is disabled on the selected browser' % benchmark.Name()
    if finder_options.run_disabled_tests and not permanently_disabled:
      print 'Running benchmark anyway due to: --also-run-disabled-tests'
    else:
      print 'Try --also-run-disabled-tests to force the benchmark to run.'
      # If chartjson is specified, this will print a dict indicating the
      # benchmark name and disabled state.
      with results_options.CreateResults(
          benchmark_metadata, finder_options,
          benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False
          ) as results:
        results.PrintSummary()
      # When a disabled benchmark is run we now want to return success since
      # we are no longer filtering these out in the buildbot recipes.
      return 0

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  disabled_attr_name = decorators.DisabledAttributeName(benchmark)
  # pylint: disable=protected-access
  pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
  if hasattr(benchmark, '_enabled_strings'):
    # pylint: disable=protected-access
    pt._enabled_strings = benchmark._enabled_strings

  stories = benchmark.CreateStorySet(finder_options)

  if isinstance(pt, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in stories.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      benchmark.ValueCanBeAddedPredicate, benchmark_enabled=True) as results:
    try:
      Run(pt, stories, finder_options, results, benchmark.max_failures,
          expectations=expectations, metadata=benchmark.GetMetadata())
      return_code = min(254, len(results.failures))
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(stories)
    except Exception: # pylint: disable=broad-except
      results.telemetry_info.InterruptBenchmark()
      exception_formatter.PrintFormattedException()
      return_code = 255

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()

    if benchmark_owners:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadProfilingFilesToCloud()
    finally:
      duration = time.time() - start
      results.AddSummaryValue(scalar.ScalarValue(
          None, 'benchmark_duration', 'minutes', duration / 60.0))
      results.PrintSummary()
  return return_code
Exemple #20
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
  benchmark.CustomizeOptions(finder_options)
  with results_options.CreateResults(
      finder_options,
      benchmark_name=benchmark.Name(),
      benchmark_description=benchmark.Description(),
      report_progress=not finder_options.suppress_gtest_report,
      should_add_value=benchmark.ShouldAddValue) as results:

    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
      print ('No browser of type "%s" found for running benchmark "%s".' % (
          finder_options.browser_options.browser_type, benchmark.Name()))
      return -1
    benchmark.expectations.SetTags(
        possible_browser.GetTypExpectationsTags())
    if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
      return -1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    story_set = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
      if any(not isinstance(p, page.Page) for p in story_set.stories):
        raise Exception(
            'PageTest must be used with StorySet containing only '
            'telemetry.page.Page stories.')
    try:
      Run(pt, story_set, finder_options, results, benchmark.max_failures,
          expectations=benchmark.expectations,
          max_num_values=benchmark.MAX_NUM_VALUES)
      if results.benchmark_interrupted:
        return_code = 2
      elif results.had_failures:
        return_code = 1
      elif results.had_successes:
        return_code = 0
      else:
        return_code = -1  # All stories were skipped.
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(story_set)
    except Exception as exc: # pylint: disable=broad-except
      interruption = 'Benchmark execution interrupted: %r' % exc
      results.InterruptBenchmark(interruption)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    if finder_options.upload_results:
      results_processor.UploadArtifactsToCloud(results)
  return return_code
Exemple #21
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    An exit code from exit_codes module describing what happened.
  """
    benchmark_name = benchmark.Name()
    if not re.match(_RE_VALID_TEST_SUITE_NAME, benchmark_name):
        logging.fatal('Invalid benchmark name: %s', benchmark_name)
        return 2  # exit_codes.FATAL_ERROR

    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print('No browser of type "%s" found for running benchmark "%s".' %
              (finder_options.browser_options.browser_type, benchmark.Name()))
        return exit_codes.ALL_TESTS_SKIPPED

    benchmark.CustomizeOptions(finder_options, possible_browser)

    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark_name,
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return exit_codes.ALL_TESTS_SKIPPED

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')

        results.AddSharedDiagnostics(
            architecture=possible_browser.platform.GetArchName(),
            device_id=possible_browser.platform.GetDeviceId(),
            os_name=possible_browser.platform.GetOSName(),
            os_version=possible_browser.platform.GetOSVersionName(),
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
            info_blurb=benchmark.GetInfoBlurb(),
        )

        try:
            RunStorySet(test, story_set, finder_options, results,
                        benchmark.max_failures, possible_browser)
            if results.benchmark_interrupted:
                return_code = exit_codes.FATAL_ERROR
            elif results.had_failures:
                return_code = exit_codes.TEST_FAILURE
            elif results.had_successes:
                return_code = exit_codes.SUCCESS
            else:
                return_code = exit_codes.ALL_TESTS_SKIPPED
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = exit_codes.FATAL_ERROR
    return return_code
Exemple #22
0
def RunStorySet(test, story_set, possible_browser, expectations,
                browser_options, finder_options, results,
                max_failures=None, max_num_values=sys.maxint):
  """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the story runner
  can continue running the remaining stories.

  Args:
    test: a test to be run: either a StoryTest subclass for newer timeline
      based benchmarks, or a LegacyPageTest to support older benchmarks.
    story_set: an instance of StorySet, a collection of stories.
    possible_browser: an instance of PossibleBrowser.
    expectations: an instance of Expectations.
    browser_options: options to be passed to browser.
    finder_options: options controlling the execution of benchmark. This
      can be an instance of BrowserFinderOptions class, but only options
      relevant to benchmark execution will be read.
    max_failures: maximum allowed number of failures.
    max_num_values: maximum allowed number of values.
  """

  if (not finder_options.use_live_sites and
      browser_options.wpr_mode != wpr_modes.WPR_RECORD):
    # Get the serving dirs of the filtered stories.
    # TODO(crbug.com/883798): removing story_set._serving_dirs
    serving_dirs = story_set._serving_dirs.copy()
    for story in story_set:
      if story.serving_dir:
        serving_dirs.add(story.serving_dir)

    if story_set.bucket:
      for directory in serving_dirs:
        cloud_storage.GetFilesInDirectoryIfChanged(directory,
                                                   story_set.bucket)
    if story_set.archive_data_file and not _UpdateAndCheckArchives(
        story_set.archive_data_file, story_set.wpr_archive_info, story_set):
      return

  # Effective max failures gives priority to command-line flag value.
  effective_max_failures = finder_options.max_failures
  if effective_max_failures is None:
    effective_max_failures = max_failures

  state = None
  device_info_diags = {}
  # TODO(crbug.com/866458): unwind the nested blocks
  # pylint: disable=too-many-nested-blocks
  try:
    pageset_repeat = finder_options.pageset_repeat
    for storyset_repeat_counter in xrange(pageset_repeat):
      for story in story_set:
        start_timestamp = time.time()
        if not state:
          # Construct shared state by using a copy of finder_options. Shared
          # state may update the finder_options. If we tear down the shared
          # state after this story run, we want to construct the shared
          # state for the next story from the original finder_options.
          state = story_set.shared_state_class(
              test, finder_options.Copy(), story_set, possible_browser)

        results.WillRunPage(story, storyset_repeat_counter)
        story_run = results.current_page_run

        if expectations:
          disabled = expectations.IsStoryDisabled(
              story, state.platform, finder_options)
          if disabled:
            if finder_options.run_disabled_tests:
              logging.warning('Force running a disabled story: %s' %
                              story.name)
            else:
              results.Skip(disabled)
              results.DidRunPage(story)
              continue

        try:
          if state.platform:
            state.platform.WaitForBatteryTemperature(35)
            _WaitForThermalThrottlingIfNeeded(state.platform)
          _RunStoryAndProcessErrorIfNeeded(story, results, state, test)

          num_values = len(results.all_page_specific_values)
          # TODO(#4259): Convert this to an exception-based failure
          if num_values > max_num_values:
            msg = 'Too many values: %d > %d' % (num_values, max_num_values)
            logging.error(msg)
            results.Fail(msg)

          device_info_diags = _MakeDeviceInfoDiagnostics(state)
        except _UNHANDLEABLE_ERRORS:
          # Nothing else we should do for these. Re-raise the error.
          raise
        except Exception:  # pylint: disable=broad-except
          # For all other errors, try to give the rest of stories a chance
          # to run by tearing down the state and creating a new state instance
          # in the next iteration.
          try:
            # If TearDownState raises, do not catch the exception.
            # (The Error was saved as a failure value.)
            state.TearDownState()
          finally:
            # Later finally-blocks use state, so ensure it is cleared.
            state = None
        finally:
          has_existing_exception = sys.exc_info() != (None, None, None)
          try:
            if state and state.platform:
              _CheckThermalThrottling(state.platform)
            results.DidRunPage(story)
            story_run.SetDuration(time.time() - start_timestamp)
          except Exception:  # pylint: disable=broad-except
            if not has_existing_exception:
              raise
            # Print current exception and propagate existing exception.
            exception_formatter.PrintFormattedException(
                msg='Exception from result processing:')
        if (effective_max_failures is not None and
            results.num_failed > effective_max_failures):
          logging.error('Too many failures. Aborting.')
          return
  finally:
    results.ComputeTimelineBasedMetrics()
    results.PopulateHistogramSet()

    for name, diag in device_info_diags.iteritems():
      results.AddSharedDiagnosticToAllHistograms(name, diag)

    if state:
      has_existing_exception = sys.exc_info() != (None, None, None)
      try:
        state.TearDownState()
      except Exception: # pylint: disable=broad-except
        if not has_existing_exception:
          raise
        # Print current exception and propagate existing exception.
        exception_formatter.PrintFormattedException(
            msg='Exception from TearDownState:')
Exemple #23
0
def RunStorySet(test,
                story_set,
                finder_options,
                results,
                max_failures=None,
                expectations=None,
                max_num_values=sys.maxint):
    """Runs a test against a story_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt. Some
  other exceptions are handled and recorded before allowing the remaining
  stories to run.

  Args:
    test: Either a StoryTest or a LegacyPageTest instance.
    story_set: A StorySet instance with the set of stories to run.
    finder_options: The parsed command line options to customize the run.
    results: A PageTestResults object used to collect results and artifacts.
    max_failures: Max number of story run failures allowed before aborting
      the entire story run. It's overriden by finder_options.max_failures
      if given.
    expectations: Benchmark expectations used to determine disabled stories.
    max_num_values: Max number of legacy values allowed before aborting the
      story run.
  """
    stories = story_set.stories
    for s in stories:
        ValidateStory(s)

    # Filter page set based on options.
    stories = story_module.StoryFilter.FilterStories(stories)
    wpr_archive_info = story_set.wpr_archive_info
    # Sort the stories based on the archive name, to minimize how often the
    # network replay-server needs to be restarted.
    if wpr_archive_info:
        stories = sorted(stories, key=wpr_archive_info.WprFilePathForStory)

    if finder_options.print_only:
        if finder_options.print_only == 'tags':
            tags = set(itertools.chain.from_iterable(s.tags for s in stories))
            print 'List of tags:\n%s' % '\n'.join(tags)
            return
        include_tags = finder_options.print_only == 'both'
        if include_tags:
            format_string = '  %%-%ds %%s' % max(len(s.name) for s in stories)
        else:
            format_string = '%s%s'
        for s in stories:
            print format_string % (s.name,
                                   ','.join(s.tags) if include_tags else '')
        return

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        # Get the serving dirs of the filtered stories.
        # TODO(crbug.com/883798): removing story_set._serving_dirs
        serving_dirs = story_set._serving_dirs.copy()
        for story in stories:
            if story.serving_dir:
                serving_dirs.add(story.serving_dir)

        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, wpr_archive_info, stories):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    possible_browser = _GetPossibleBrowser(finder_options)

    if not finder_options.run_full_story_set:
        tag_filter = story_set.GetAbridgedStorySetTagFilter()
        if tag_filter:
            logging.warn(
                'Running an abridged set of stories (tagged {%s}), '
                'use --run-full-story-set if you need to run all stories' %
                tag_filter)
            stories = [story for story in stories if tag_filter in story.tags]

    state = None
    device_info_diags = {}
    # TODO(crbug.com/866458): unwind the nested blocks
    # pylint: disable=too-many-nested-blocks
    try:
        pageset_repeat = finder_options.pageset_repeat
        for storyset_repeat_counter in xrange(pageset_repeat):
            for story in stories:
                if not state:
                    # Construct shared state by using a copy of finder_options. Shared
                    # state may update the finder_options. If we tear down the shared
                    # state after this story run, we want to construct the shared
                    # state for the next story from the original finder_options.
                    state = story_set.shared_state_class(
                        test, finder_options.Copy(), story_set,
                        possible_browser)

                results.WillRunPage(story, storyset_repeat_counter)

                if expectations:
                    disabled = expectations.IsStoryDisabled(story)
                    if disabled:
                        if finder_options.run_disabled_tests:
                            logging.warning(
                                'Force running a disabled story: %s' %
                                story.name)
                        else:
                            results.Skip(disabled)
                            results.DidRunPage(story)
                            continue

                if results.benchmark_interrupted:
                    results.Skip(results.benchmark_interruption,
                                 is_expected=False)
                    results.DidRunPage(story)
                    continue

                try:
                    if state.platform:
                        state.platform.WaitForBatteryTemperature(35)
                        if finder_options.wait_for_cpu_temp:
                            state.platform.WaitForCpuTemperature(38.0)
                        _WaitForThermalThrottlingIfNeeded(state.platform)
                    _RunStoryAndProcessErrorIfNeeded(story, results, state,
                                                     test)

                    num_values = sum(1 for _ in results.IterAllLegacyValues())
                    # TODO(#4259): Convert this to an exception-based failure
                    if num_values > max_num_values:
                        msg = 'Too many values: %d > %d' % (num_values,
                                                            max_num_values)
                        logging.error(msg)
                        results.Fail(msg)

                    device_info_diags = _MakeDeviceInfoDiagnostics(state)
                except _UNHANDLEABLE_ERRORS as exc:
                    interruption = (
                        'Benchmark execution interrupted by a fatal exception: %r'
                        % exc)
                    results.InterruptBenchmark(interruption)
                    exception_formatter.PrintFormattedException()
                except Exception:  # pylint: disable=broad-except
                    logging.exception('Exception raised during story run.')
                    results.Fail(sys.exc_info())
                    # For all other errors, try to give the rest of stories a chance
                    # to run by tearing down the state and creating a new state instance
                    # in the next iteration.
                    try:
                        # If TearDownState raises, do not catch the exception.
                        # (The Error was saved as a failure value.)
                        state.TearDownState()
                    except Exception as exc:  # pylint: disable=broad-except
                        interruption = (
                            'Benchmark execution interrupted by a fatal exception: %r'
                            % exc)
                        results.InterruptBenchmark(interruption)
                        exception_formatter.PrintFormattedException()
                    finally:
                        # Later finally-blocks use state, so ensure it is cleared.
                        state = None
                finally:
                    if state and state.platform:
                        _CheckThermalThrottling(state.platform)
                    results.DidRunPage(story)
                if (effective_max_failures is not None
                        and results.num_failed > effective_max_failures):
                    interruption = (
                        'Too many stories failed. Aborting the rest of the stories.'
                    )
                    results.InterruptBenchmark(interruption)
    finally:
        results_processor.ComputeTimelineBasedMetrics(results)
        results.PopulateHistogramSet()
        results.AddSharedDiagnostics(**device_info_diags)

        if state:
            has_existing_exception = sys.exc_info() != (None, None, None)
            try:
                state.TearDownState()
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception from TearDownState:')
Exemple #24
0
def Run(test,
        story_set,
        finder_options,
        results,
        max_failures=None,
        expectations=None,
        max_num_values=sys.maxint):
    """Runs a given test against a given page_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt.
  We "white list" certain exceptions for which the story runner
  can continue running the remaining stories.
  """
    for s in story_set:
        ValidateStory(s)

    # Filter page set based on options.
    stories = story_module.StoryFilter.FilterStorySet(story_set)

    if finder_options.print_only:
        if finder_options.print_only == 'tags':
            tags = set(itertools.chain.from_iterable(s.tags for s in stories))
            print 'List of tags:\n%s' % '\n'.join(tags)
            return
        include_tags = finder_options.print_only == 'both'
        if include_tags:
            format_string = '  %%-%ds %%s' % max(len(s.name) for s in stories)
        else:
            format_string = '%s%s'
        for s in stories:
            print format_string % (s.name,
                                   ','.join(s.tags) if include_tags else '')
        return

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        # Get the serving dirs of the filtered stories.
        # TODO(crbug.com/883798): removing story_set._serving_dirs
        serving_dirs = story_set._serving_dirs.copy()
        for story in stories:
            if story.serving_dir:
                serving_dirs.add(story.serving_dir)

        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, story_set.wpr_archive_info,
                stories):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    state = None
    device_info_diags = {}
    # TODO(crbug.com/866458): unwind the nested blocks
    # pylint: disable=too-many-nested-blocks
    try:
        pageset_repeat = finder_options.pageset_repeat
        for storyset_repeat_counter in xrange(pageset_repeat):
            for story in stories:
                start_timestamp = time.time()
                if not state:
                    # Construct shared state by using a copy of finder_options. Shared
                    # state may update the finder_options. If we tear down the shared
                    # state after this story run, we want to construct the shared
                    # state for the next story from the original finder_options.
                    state = story_set.shared_state_class(
                        test, finder_options.Copy(), story_set)

                results.WillRunPage(story, storyset_repeat_counter)
                story_run = results.current_page_run

                if expectations:
                    disabled = expectations.IsStoryDisabled(
                        story, state.platform, finder_options)
                    if disabled and not finder_options.run_disabled_tests:
                        results.Skip(disabled)
                        results.DidRunPage(story)
                        continue

                try:
                    if state.platform:
                        state.platform.WaitForBatteryTemperature(35)
                        _WaitForThermalThrottlingIfNeeded(state.platform)
                    _RunStoryAndProcessErrorIfNeeded(story, results, state,
                                                     test)

                    num_values = len(results.all_page_specific_values)
                    # TODO(#4259): Convert this to an exception-based failure
                    if num_values > max_num_values:
                        msg = 'Too many values: %d > %d' % (num_values,
                                                            max_num_values)
                        logging.error(msg)
                        results.Fail(msg)

                    device_info_diags = _MakeDeviceInfoDiagnostics(state)
                except _UNHANDLEABLE_ERRORS:
                    # Nothing else we should do for these. Re-raise the error.
                    raise
                except Exception:  # pylint: disable=broad-except
                    # For all other errors, try to give the rest of stories a chance
                    # to run by tearing down the state and creating a new state instance
                    # in the next iteration.
                    try:
                        # If TearDownState raises, do not catch the exception.
                        # (The Error was saved as a failure value.)
                        state.TearDownState()
                    finally:
                        # Later finally-blocks use state, so ensure it is cleared.
                        state = None
                finally:
                    has_existing_exception = sys.exc_info() != (None, None,
                                                                None)
                    try:
                        if state and state.platform:
                            _CheckThermalThrottling(state.platform)
                        results.DidRunPage(story)
                        story_run.SetDuration(time.time() - start_timestamp)
                    except Exception:  # pylint: disable=broad-except
                        if not has_existing_exception:
                            raise
                        # Print current exception and propagate existing exception.
                        exception_formatter.PrintFormattedException(
                            msg='Exception from result processing:')
                if (effective_max_failures is not None
                        and results.num_failed > effective_max_failures):
                    logging.error('Too many failures. Aborting.')
                    return
    finally:
        results.PopulateHistogramSet()

        for name, diag in device_info_diags.iteritems():
            results.AddSharedDiagnosticToAllHistograms(name, diag)

        if state:
            has_existing_exception = sys.exc_info() != (None, None, None)
            try:
                state.TearDownState()
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception from TearDownState:')
Exemple #25
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    possible_browser = browser_finder.FindBrowser(finder_options)
    if possible_browser and benchmark.ShouldDisable(possible_browser):
        logging.warning('%s is disabled on the selected browser',
                        benchmark.Name())
        if finder_options.run_disabled_tests:
            logging.warning(
                'Running benchmark anyway due to: --also-run-disabled-tests')
        else:
            logging.warning(
                'Try --also-run-disabled-tests to force the benchmark to run.')
            return 1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)
    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    benchmark_metadata = benchmark.GetMetadata()
    with results_options.CreateResults(
            benchmark_metadata, finder_options,
            benchmark.ValueCanBeAddedPredicate) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            results.PrintSummary()
    return return_code
Exemple #26
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(exc=None):
        state.DumpStateUponFailure(story, results)

        # Dump app crash, if present
        if exc:
            if isinstance(exc, exceptions.AppCrashException):
                minidump_path = exc.minidump_path
                if minidump_path:
                    results.AddArtifact(story.name, 'minidump', minidump_path)

        # Note: calling Fail on the results object also normally causes the
        # progress_reporter to log it in the output.
        results.Fail(sys.exc_info())

    with CaptureLogsAsArtifacts(results, story.name):
        try:
            if isinstance(test, story_test.StoryTest):
                test.WillRunStory(state.platform)
            state.WillRunStory(story)

            if not state.CanRunStory(story):
                results.Skip('Skipped because story is not supported '
                             '(SharedState.CanRunStory() returns False).')
                return
            state.RunStory(results)
            if isinstance(test, story_test.StoryTest):
                test.Measure(state.platform, results)
        except (legacy_page_test.Failure, exceptions.TimeoutException,
                exceptions.LoginException, exceptions.ProfilingException,
                py_utils.TimeoutException) as exc:
            ProcessError(exc)
        except exceptions.Error as exc:
            ProcessError(exc)
            raise
        except page_action.PageActionNotSupported as exc:
            results.Skip('Unsupported page action: %s' % exc)
        except Exception:
            ProcessError()
            raise
        finally:
            has_existing_exception = (sys.exc_info() != (None, None, None))
            try:
                # We attempt to stop tracing and/or metric collecting before possibly
                # closing the browser. Closing the browser first and stopping tracing
                # later appeared to cause issues where subsequent browser instances
                # would not launch correctly on some devices (see: crbug.com/720317).
                # The following normally cause tracing and/or metric collecting to stop.
                if isinstance(test, story_test.StoryTest):
                    test.DidRunStory(state.platform, results)
                else:
                    test.DidRunPage(state.platform)
                # And the following normally causes the browser to be closed.
                state.DidRunStory(results)
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    state.DumpStateUponFailure(story, results)
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception raised when cleaning story run: ')
Exemple #27
0
def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
    def ProcessError(exc, log_message):
        logging.exception(log_message)
        state.DumpStateUponStoryRunFailure(results)

        # Dump app crash, if present
        if exc:
            if isinstance(exc, exceptions.AppCrashException):
                minidump_path = exc.minidump_path
                if minidump_path:
                    with results.CaptureArtifact('minidump.dmp') as path:
                        shutil.move(minidump_path, path)

        # Note: calling Fail on the results object also normally causes the
        # progress_reporter to log it in the output.
        results.Fail('Exception raised running %s' % story.name)

    with CaptureLogsAsArtifacts(results):
        try:
            if isinstance(test, story_test.StoryTest):
                test.WillRunStory(state.platform)
            state.WillRunStory(story)

            if not state.CanRunStory(story):
                results.Skip('Skipped because story is not supported '
                             '(SharedState.CanRunStory() returns False).')
                return
            story.wpr_mode = state.wpr_mode
            state.RunStory(results)
            if isinstance(test, story_test.StoryTest):
                test.Measure(state.platform, results)
        except page_action.PageActionNotSupported as exc:
            results.Skip('Unsupported page action: %s' % exc)
        except (legacy_page_test.Failure, exceptions.TimeoutException,
                exceptions.LoginException, py_utils.TimeoutException) as exc:
            ProcessError(exc, log_message='Handleable error')
        except _UNHANDLEABLE_ERRORS as exc:
            ProcessError(exc,
                         log_message=('Unhandleable error. '
                                      'Benchmark run will be interrupted'))
            raise
        except Exception as exc:  # pylint: disable=broad-except
            ProcessError(exc,
                         log_message=('Possibly handleable error. '
                                      'Will try to restart shared state'))
            # The caller, RunStorySet, will catch this exception, destory and
            # create a new shared state.
            raise
        finally:
            has_existing_exception = (sys.exc_info() != (None, None, None))
            try:
                # We attempt to stop tracing and/or metric collecting before possibly
                # closing the browser. Closing the browser first and stopping tracing
                # later appeared to cause issues where subsequent browser instances
                # would not launch correctly on some devices (see: crbug.com/720317).
                # The following normally cause tracing and/or metric collecting to stop.
                if isinstance(test, story_test.StoryTest):
                    test.DidRunStory(state.platform, results)
                else:
                    test.DidRunPage(state.platform)
                # And the following normally causes the browser to be closed.
                state.DidRunStory(results)
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    state.DumpStateUponStoryRunFailure(results)
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception raised when cleaning story run: ')
Exemple #28
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
        return -1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            benchmark_enabled=True,
            should_add_value=benchmark.ShouldAddValue) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=benchmark.expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            if results.had_failures:
                return_code = 1
            elif results.had_successes_not_skipped:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception as e:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception: %s(%s)'
                % (type(e), e))

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       finder_options.pageset_repeat)
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.OWNERS.name, benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.BUG_COMPONENTS.name, benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.DOCUMENTATION_URLS.name,
                benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
Exemple #29
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            print('No browser of type "%s" found for running benchmark "%s".' %
                  (finder_options.browser_options.browser_type,
                   benchmark.Name()))
            return -1
        typ_expectation_tags = possible_browser.GetTypExpectationsTags()
        logging.info(
            'The following expectations condition tags were generated %s',
            str(typ_expectation_tags))
        try:
            benchmark.expectations.SetTags(
                typ_expectation_tags,
                not finder_options.skip_typ_expectations_tags_validation)
        except ValueError as e:  # pylint: disable=broad-except
            traceback.print_exc(file=sys.stdout)
            logging.error(
                str(e) +
                '\nYou can use the --skip-typ-expectations-tags-validation '
                'argument to suppress this exception.')
            return -1

        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return -1

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')
        try:
            RunStorySet(test,
                        story_set,
                        finder_options,
                        results,
                        benchmark.max_failures,
                        expectations=benchmark.expectations,
                        max_num_values=benchmark.MAX_NUM_VALUES)
            if results.benchmark_interrupted:
                return_code = 2
            elif results.had_failures:
                return_code = 1
            elif results.had_successes:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = 2

        # TODO(crbug.com/981349): merge two calls to AddSharedDiagnostics
        # (see RunStorySet() method for the second one).
        results.AddSharedDiagnostics(
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
        )

        if finder_options.upload_results:
            results_processor.UploadArtifactsToCloud(results)
    return return_code
Exemple #30
0
def RunStorySet(test,
                story_set,
                finder_options,
                results,
                max_failures=None,
                found_possible_browser=None):
    """Runs a test against a story_set with the given options.

  Stop execution for unexpected exceptions such as KeyboardInterrupt. Some
  other exceptions are handled and recorded before allowing the remaining
  stories to run.

  Args:
    test: Either a StoryTest or a LegacyPageTest instance.
    story_set: A StorySet instance with the set of stories to run.
    finder_options: The parsed command line options to customize the run.
    results: A PageTestResults object used to collect results and artifacts.
    max_failures: Max number of story run failures allowed before aborting
      the entire story run. It's overriden by finder_options.max_failures
      if given.
    found_possible_broswer: The possible version of browser to use. We don't
      need to find again if this is given.
    expectations: Benchmark expectations used to determine disabled stories.
  """
    stories = story_set.stories
    for s in stories:
        ValidateStory(s)

    if found_possible_browser:
        possible_browser = found_possible_browser
        finder_options.browser_options.browser_type = possible_browser.browser_type
    else:
        possible_browser = _GetPossibleBrowser(finder_options)
    platform_tags = possible_browser.GetTypExpectationsTags()
    logging.info('The following expectations condition tags were generated %s',
                 str(platform_tags))
    abridged_story_set_tag = story_set.GetAbridgedStorySetTagFilter()
    story_filter = story_filter_module.StoryFilterFactory.BuildStoryFilter(
        results.benchmark_name, platform_tags, abridged_story_set_tag)
    stories = story_filter.FilterStories(stories)
    wpr_archive_info = story_set.wpr_archive_info
    # Sort the stories based on the archive name, to minimize how often the
    # network replay-server needs to be restarted.
    if wpr_archive_info:
        stories = sorted(stories, key=wpr_archive_info.WprFilePathForStory)

    if finder_options.print_only:
        if finder_options.print_only == 'tags':
            tags = set(itertools.chain.from_iterable(s.tags for s in stories))
            print 'List of tags:\n%s' % '\n'.join(tags)
            return
        include_tags = finder_options.print_only == 'both'
        if include_tags:
            format_string = '  %%-%ds %%s' % max(len(s.name) for s in stories)
        else:
            format_string = '%s%s'
        for s in stories:
            print format_string % (s.name,
                                   ','.join(s.tags) if include_tags else '')
        return

    if (not finder_options.use_live_sites and
            finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        # Get the serving dirs of the filtered stories.
        # TODO(crbug.com/883798): removing story_set._serving_dirs
        serving_dirs = story_set._serving_dirs.copy()
        for story in stories:
            if story.serving_dir:
                serving_dirs.add(story.serving_dir)

        if story_set.bucket:
            for directory in serving_dirs:
                cloud_storage.GetFilesInDirectoryIfChanged(
                    directory, story_set.bucket)
        if story_set.archive_data_file and not _UpdateAndCheckArchives(
                story_set.archive_data_file, wpr_archive_info, stories,
                story_filter):
            return

    if not stories:
        return

    # Effective max failures gives priority to command-line flag value.
    effective_max_failures = finder_options.max_failures
    if effective_max_failures is None:
        effective_max_failures = max_failures

    state = None
    # TODO(crbug.com/866458): unwind the nested blocks
    # pylint: disable=too-many-nested-blocks
    try:
        pageset_repeat = finder_options.pageset_repeat
        for storyset_repeat_counter in xrange(pageset_repeat):
            for story in stories:
                if not state:
                    # Construct shared state by using a copy of finder_options. Shared
                    # state may update the finder_options. If we tear down the shared
                    # state after this story run, we want to construct the shared
                    # state for the next story from the original finder_options.
                    state = story_set.shared_state_class(
                        test, finder_options.Copy(), story_set,
                        possible_browser)

                with results.CreateStoryRun(story, storyset_repeat_counter):
                    skip_reason = story_filter.ShouldSkip(story)
                    if skip_reason:
                        results.Skip(skip_reason)
                        continue

                    if results.benchmark_interrupted:
                        results.Skip(results.benchmark_interruption,
                                     expected=False)
                        continue

                    try:
                        if state.platform:
                            state.platform.WaitForBatteryTemperature(35)
                            if finder_options.wait_for_cpu_temp:
                                state.platform.WaitForCpuTemperature(38.0)
                            _WaitForThermalThrottlingIfNeeded(state.platform)
                        _RunStoryAndProcessErrorIfNeeded(
                            story, results, state, test, finder_options)
                    except _UNHANDLEABLE_ERRORS as exc:
                        interruption = (
                            'Benchmark execution interrupted by a fatal exception: %r'
                            % exc)
                        results.InterruptBenchmark(interruption)
                        exception_formatter.PrintFormattedException()
                    except Exception:  # pylint: disable=broad-except
                        logging.exception('Exception raised during story run.')
                        results.Fail(sys.exc_info())
                        # For all other errors, try to give the rest of stories a chance
                        # to run by tearing down the state and creating a new state
                        # instance in the next iteration.
                        try:
                            # If TearDownState raises, do not catch the exception.
                            # (The Error was saved as a failure value.)
                            state.TearDownState()
                        except Exception as exc:  # pylint: disable=broad-except
                            interruption = (
                                'Benchmark execution interrupted by a fatal exception: %r'
                                % exc)
                            results.InterruptBenchmark(interruption)
                            exception_formatter.PrintFormattedException()
                        finally:
                            # Later finally-blocks use state, so ensure it is cleared.
                            state = None
                    finally:
                        if state and state.platform:
                            _CheckThermalThrottling(state.platform)

                if (effective_max_failures is not None
                        and results.num_failed > effective_max_failures):
                    interruption = (
                        'Too many stories failed. Aborting the rest of the stories.'
                    )
                    results.InterruptBenchmark(interruption)
    finally:
        if state:
            has_existing_exception = sys.exc_info() != (None, None, None)
            try:
                state.TearDownState()
            except Exception:  # pylint: disable=broad-except
                if not has_existing_exception:
                    raise
                # Print current exception and propagate existing exception.
                exception_formatter.PrintFormattedException(
                    msg='Exception from TearDownState:')