示例#1
0
    def testMultipleTabsOkayForMultiTabTest(self):
        story_set = story.StorySet()
        page = page_module.Page('file://blank.html',
                                story_set,
                                base_dir=util.GetUnittestDataDir())
        story_set.AddStory(page)

        class TestMultiTabs(legacy_page_test.LegacyPageTest):
            def TabForPage(self, page, browser):
                del page  # unused
                return browser.tabs.New()

            def ValidateAndMeasurePage(self, page, tab, results):
                del page, results  # unused
                assert len(tab.browser.tabs) == 2

        test = TestMultiTabs()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(test, story_set, options, results)
示例#2
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1

    can_run_on_platform = benchmark._CanRunOnPlatform(
        possible_browser.platform, finder_options)

    expectations_disabled = expectations.IsBenchmarkDisabled(
        possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                metadata=benchmark.GetMetadata(),
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except
            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')
            results.telemetry_info.InterruptBenchmark()
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadProfilingFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'benchmark_duration', 'minutes',
                                   duration / 60.0))
            results.AddDurationHistogram(duration * 1000.0)
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
示例#3
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
  benchmark.CustomizeOptions(finder_options)
  with results_options.CreateResults(
      finder_options,
      benchmark_name=benchmark.Name(),
      benchmark_description=benchmark.Description(),
      report_progress=not finder_options.suppress_gtest_report,
      should_add_value=benchmark.ShouldAddValue) as results:

    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
      print ('No browser of type "%s" found for running benchmark "%s".' % (
          finder_options.browser_options.browser_type, benchmark.Name()))
      return -1
    benchmark.expectations.SetTags(
        possible_browser.GetTypExpectationsTags())
    if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
      return -1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    story_set = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
      if any(not isinstance(p, page.Page) for p in story_set.stories):
        raise Exception(
            'PageTest must be used with StorySet containing only '
            'telemetry.page.Page stories.')
    try:
      Run(pt, story_set, finder_options, results, benchmark.max_failures,
          expectations=benchmark.expectations,
          max_num_values=benchmark.MAX_NUM_VALUES)
      if results.benchmark_interrupted:
        return_code = 2
      elif results.had_failures:
        return_code = 1
      elif results.had_successes:
        return_code = 0
      else:
        return_code = -1  # All stories were skipped.
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(story_set)
    except Exception as exc: # pylint: disable=broad-except
      interruption = 'Benchmark execution interrupted: %r' % exc
      results.InterruptBenchmark(interruption)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    if finder_options.upload_results:
      results_processor.UploadArtifactsToCloud(results)
  return return_code
示例#4
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    An exit code from exit_codes module describing what happened.
  """
    benchmark_name = benchmark.Name()
    if not re.match(_RE_VALID_TEST_SUITE_NAME, benchmark_name):
        logging.fatal('Invalid benchmark name: %s', benchmark_name)
        return 2  # exit_codes.FATAL_ERROR

    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print('No browser of type "%s" found for running benchmark "%s".' %
              (finder_options.browser_options.browser_type, benchmark.Name()))
        return exit_codes.ALL_TESTS_SKIPPED

    benchmark.CustomizeOptions(finder_options, possible_browser)

    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark_name,
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return exit_codes.ALL_TESTS_SKIPPED

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')

        results.AddSharedDiagnostics(
            architecture=possible_browser.platform.GetArchName(),
            device_id=possible_browser.platform.GetDeviceId(),
            os_name=possible_browser.platform.GetOSName(),
            os_version=possible_browser.platform.GetOSVersionName(),
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
            info_blurb=benchmark.GetInfoBlurb(),
        )

        try:
            RunStorySet(test, story_set, finder_options, results,
                        benchmark.max_failures, possible_browser)
            if results.benchmark_interrupted:
                return_code = exit_codes.FATAL_ERROR
            elif results.had_failures:
                return_code = exit_codes.TEST_FAILURE
            elif results.had_successes:
                return_code = exit_codes.SUCCESS
            else:
                return_code = exit_codes.ALL_TESTS_SKIPPED
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = exit_codes.FATAL_ERROR
    return return_code
示例#5
0
 def RunStories(self, stories, **kwargs):
     story_set = test_stories.DummyStorySet(stories)
     with results_options.CreateResults(
             self.options, benchmark_name='benchmark') as results:
         story_runner.RunStorySet(self.mock_story_test, story_set,
                                  self.options, results, **kwargs)
示例#6
0
    def CreateResults(self):
        benchmark_metadata = self._CreateBenchmarkMetadata()

        return results_options.CreateResults(benchmark_metadata, self._options)
示例#7
0
def RunBenchmark(benchmark, story_set, possible_browser,
                 browser_options, finder_options):
  """Run the benchmark on the given browser with given options.

  Args:
    benchmark: an instance of Benchmark class, a benchmark to be run.
    story_set: and instance of StorySet, a collection of stories.
    possible_browser: an instance of PossibleBrowser.
    browser_options: options to be passed to browser.
    finder_options: options controlling the execution of benchmark. This
      can be an instance of BrowserFinderOptions class, but only options
      relevant to benchmark execution will be read.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
  benchmark_metadata = benchmark.GetMetadata()
  page_test = benchmark.CreatePageTest(finder_options)
  page_test.__name__ = benchmark.__class__.__name__

  if isinstance(page_test, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in story_set):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      should_add_value=benchmark.ShouldAddValue,
      benchmark_enabled=True) as results:
    try:
      RunStorySet(
          page_test, story_set, possible_browser,
          benchmark.expectations, browser_options, finder_options, results,
          benchmark.max_failures, benchmark.MAX_NUM_VALUES)
      return_code = 1 if results.had_failures else 0
    except Exception as e: # pylint: disable=broad-except
      logging.fatal(
          'Benchmark execution interrupted by a fatal exception: %s(%s)' %
          (type(e), e))
      results.InterruptBenchmark(story_set, finder_options.pageset_repeat)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadArtifactsToCloud()
    finally:
      memory_debug.LogHostMemoryUsage()
      results.PrintSummary()
  return return_code
示例#8
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    possible_browser = browser_finder.FindBrowser(finder_options)
    if possible_browser and benchmark.ShouldDisable(possible_browser):
        logging.warning('%s is disabled on the selected browser',
                        benchmark.Name())
        if finder_options.run_disabled_tests:
            logging.warning(
                'Running benchmark anyway due to: --also-run-disabled-tests')
        else:
            logging.warning(
                'Try --also-run-disabled-tests to force the benchmark to run.')
            return 1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)
    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    benchmark_metadata = benchmark.GetMetadata()
    with results_options.CreateResults(
            benchmark_metadata, finder_options,
            benchmark.ValueCanBeAddedPredicate) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            results.PrintSummary()
    return return_code
示例#9
0
    def _testMaxFailuresOptionIsRespectedAndOverridable(
            self, num_failing_stories, runner_max_failures,
            options_max_failures, expected_num_failures):
        class SimpleSharedState(story_module.SharedState):
            _fake_platform = FakePlatform()
            _current_story = None

            @property
            def platform(self):
                return self._fake_platform

            def WillRunStory(self, story):
                self._current_story = story

            def RunStory(self, results):
                self._current_story.Run(self)

            def DidRunStory(self, results):
                pass

            def CanRunStory(self, story):
                return True

            def TearDownState(self):
                pass

            def DumpStateUponFailure(self, story, results):
                pass

        class FailingStory(story_module.Story):
            def __init__(self, name):
                super(FailingStory,
                      self).__init__(shared_state_class=SimpleSharedState,
                                     is_local=True,
                                     name=name)
                self.was_run = False

            def Run(self, shared_state):
                self.was_run = True
                raise legacy_page_test.Failure

            @property
            def url(self):
                return 'data:,'

        self.SuppressExceptionFormatting()

        story_set = story_module.StorySet()
        for i in range(num_failing_stories):
            story_set.AddStory(FailingStory(name='failing%d' % i))

        options = _GetOptionForUnittest()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        if options_max_failures:
            options.max_failures = options_max_failures

        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(DummyTest(),
                         story_set,
                         options,
                         results,
                         max_failures=runner_max_failures)
        self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
        self.assertEquals(expected_num_failures, len(results.failures))
        for ii, story in enumerate(story_set.stories):
            self.assertEqual(story.was_run, ii < expected_num_failures)
示例#10
0
    def testPageResetWhenBrowserReusedBetweenStories(self):
        class NoClosingBrowserSharedState(shared_page_state.SharedPageState):
            # Simulate what ChromeOS does.
            def ShouldStopBrowserAfterStoryRun(self, s):
                del s  # unused
                return False

        # Loads a page and scrolls it to the end.
        class ScrollingPage(page_module.Page):
            def __init__(self, url, page_set, base_dir):
                super(ScrollingPage, self).__init__(
                    page_set=page_set,
                    base_dir=base_dir,
                    shared_page_state_class=NoClosingBrowserSharedState,
                    url=url,
                    name='ScrollingPage')

            def RunPageInteractions(self, action_runner):
                action_runner.ScrollPage()

        # Loads same page as ScrollingPage() and records if the scroll position is
        # at the top of the page (in was_page_at_top_on_start).
        class CheckScrollPositionPage(page_module.Page):
            def __init__(self, url, page_set, base_dir):
                super(CheckScrollPositionPage, self).__init__(
                    page_set=page_set,
                    base_dir=base_dir,
                    shared_page_state_class=NoClosingBrowserSharedState,
                    url=url,
                    name='CheckScroll')
                self.was_page_at_top_on_start = False

            def RunPageInteractions(self, action_runner):
                scroll_y = action_runner.tab.EvaluateJavaScript(
                    'window.scrollY')
                self.was_page_at_top_on_start = scroll_y == 0

        class Test(legacy_page_test.LegacyPageTest):
            def ValidateAndMeasurePage(self, *_):
                pass

        story_set = story.StorySet()
        story_set.AddStory(
            ScrollingPage(url='file://page_with_swipeables.html',
                          page_set=story_set,
                          base_dir=util.GetUnittestDataDir()))
        test_page = CheckScrollPositionPage(
            url='file://page_with_swipeables.html',
            page_set=story_set,
            base_dir=util.GetUnittestDataDir())
        story_set.AddStory(test_page)
        test = Test()
        options = options_for_unittests.GetCopy()
        SetUpStoryRunnerArguments(options)
        # Override defaults from parser creation and arg processing.
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        options.output_dir = None
        results = results_options.CreateResults(options)
        story_runner.Run(test, story_set, options, results)
        self.assertTrue(test_page.was_page_at_top_on_start)
def RunStorySet(test, story_set, options, **kwargs):
    with results_options.CreateResults(options) as results:
        story_runner.RunStorySet(test, story_set, options, results, **kwargs)
    return results
示例#12
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
        return -1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            benchmark_enabled=True,
            should_add_value=benchmark.ShouldAddValue) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=benchmark.expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            if results.had_failures:
                return_code = 1
            elif results.had_successes_not_skipped:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception as e:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception: %s(%s)'
                % (type(e), e))

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       finder_options.pageset_repeat)
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.OWNERS.name, benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.BUG_COMPONENTS.name, benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.DOCUMENTATION_URLS.name,
                benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
示例#13
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
  start = time.time()
  benchmark.CustomizeBrowserOptions(finder_options.browser_options)

  benchmark_metadata = benchmark.GetMetadata()
  possible_browser = browser_finder.FindBrowser(finder_options)
  expectations = benchmark.InitializeExpectations()

  if not possible_browser:
    print ('Cannot find browser of type %s. To list out all '
           'available browsers, rerun your command with '
           '--browser=list' %  finder_options.browser_options.browser_type)
    return 1

  can_run_on_platform = benchmark._CanRunOnPlatform(possible_browser.platform,
                                                    finder_options)

  # TODO(rnephew): Remove decorators.IsBenchmarkEnabled and IsBenchmarkDisabled
  # when we have fully moved to _CanRunOnPlatform().
  permanently_disabled = expectations.IsBenchmarkDisabled(
      possible_browser.platform, finder_options)
  temporarily_disabled = not decorators.IsBenchmarkEnabled(
      benchmark, possible_browser)

  if permanently_disabled or temporarily_disabled or not can_run_on_platform:
    print '%s is disabled on the selected browser' % benchmark.Name()
    if finder_options.run_disabled_tests and not permanently_disabled:
      print 'Running benchmark anyway due to: --also-run-disabled-tests'
    else:
      print 'Try --also-run-disabled-tests to force the benchmark to run.'
      # If chartjson is specified, this will print a dict indicating the
      # benchmark name and disabled state.
      with results_options.CreateResults(
          benchmark_metadata, finder_options,
          benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False
          ) as results:
        results.PrintSummary()
      # When a disabled benchmark is run we now want to return success since
      # we are no longer filtering these out in the buildbot recipes.
      return 0

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  disabled_attr_name = decorators.DisabledAttributeName(benchmark)
  # pylint: disable=protected-access
  pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
  if hasattr(benchmark, '_enabled_strings'):
    # pylint: disable=protected-access
    pt._enabled_strings = benchmark._enabled_strings

  stories = benchmark.CreateStorySet(finder_options)

  if isinstance(pt, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in stories.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      benchmark.ValueCanBeAddedPredicate, benchmark_enabled=True) as results:
    try:
      Run(pt, stories, finder_options, results, benchmark.max_failures,
          expectations=expectations, metadata=benchmark.GetMetadata())
      return_code = min(254, len(results.failures))
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(stories)
    except Exception: # pylint: disable=broad-except
      results.telemetry_info.InterruptBenchmark()
      exception_formatter.PrintFormattedException()
      return_code = 255

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()

    if benchmark_owners:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadProfilingFilesToCloud()
    finally:
      duration = time.time() - start
      results.AddSummaryValue(scalar.ScalarValue(
          None, 'benchmark_duration', 'minutes', duration / 60.0))
      results.PrintSummary()
  return return_code
示例#14
0
    def _testMaxFailuresOptionIsRespectedAndOverridable(
            self, num_failing_user_stories, runner_max_failures,
            options_max_failures, expected_num_failures):
        class SimpleSharedState(shared_state.SharedState):
            _fake_platform = FakePlatform()
            _current_user_story = None

            @property
            def platform(self):
                return self._fake_platform

            def WillRunUserStory(self, user_story):
                self._current_user_story = user_story

            def RunUserStory(self, results):
                self._current_user_story.Run()

            def DidRunUserStory(self, results):
                pass

            def GetTestExpectationAndSkipValue(self, expectations):
                return 'pass', None

            def TearDownState(self):
                pass

        class FailingUserStory(user_story_module.UserStory):
            def __init__(self):
                super(FailingUserStory,
                      self).__init__(shared_state_class=SimpleSharedState,
                                     is_local=True)
                self.was_run = False

            def Run(self):
                self.was_run = True
                raise page_test.Failure

        self.SuppressExceptionFormatting()

        story_set = story.StorySet()
        for _ in range(num_failing_user_stories):
            story_set.AddUserStory(FailingUserStory())

        options = _GetOptionForUnittest()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        if options_max_failures:
            options.max_failures = options_max_failures

        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(DummyTest(),
                         story_set,
                         test_expectations.TestExpectations(),
                         options,
                         results,
                         max_failures=runner_max_failures)
        self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
        self.assertEquals(expected_num_failures, len(results.failures))
        for ii, user_story in enumerate(story_set.user_stories):
            self.assertEqual(user_story.was_run, ii < expected_num_failures)
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1
    if (possible_browser and
            not decorators.IsBenchmarkEnabled(benchmark, possible_browser)):
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            print 'Try --also-run-disabled-tests to force the benchmark to run.'
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    benchmark.ValueCanBeAddedPredicate,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting domain.shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    with results_options.CreateResults(benchmark_metadata,
                                       finder_options,
                                       benchmark.ValueCanBeAddedPredicate,
                                       benchmark_enabled=True) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'BenchmarkDuration', 'minutes',
                                   duration / 60.0))
            results.PrintSummary()
    return return_code
示例#16
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations

    target_platform = None
    if possible_browser:
        target_platform = possible_browser.platform
    else:
        target_platform = platform_module.GetHostPlatform()

    can_run_on_platform = benchmark._CanRunOnPlatform(target_platform,
                                                      finder_options)

    expectations_disabled = False
    # For now, test expectations are only applicable in the cases where the
    # testing target involves a browser.
    if possible_browser:
        expectations_disabled = expectations.IsBenchmarkDisabled(
            possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       _GetPageSetRepeat(finder_options))
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnostic(reserved_infos.DOCUMENTATION_URLS.name,
                                        benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
示例#17
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            print('No browser of type "%s" found for running benchmark "%s".' %
                  (finder_options.browser_options.browser_type,
                   benchmark.Name()))
            return -1
        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return -1

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')

        results.AddSharedDiagnostics(
            architecture=possible_browser.platform.GetArchName(),
            device_id=possible_browser.platform.GetDeviceId(),
            os_name=possible_browser.platform.GetOSName(),
            os_version=possible_browser.platform.GetOSVersionName(),
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
        )

        try:
            RunStorySet(test, story_set, finder_options, results,
                        benchmark.max_failures)
            if results.benchmark_interrupted:
                return_code = 2
            elif results.had_failures:
                return_code = 1
            elif results.had_successes:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = 2
    return return_code
示例#18
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            print('No browser of type "%s" found for running benchmark "%s".' %
                  (finder_options.browser_options.browser_type,
                   benchmark.Name()))
            return -1
        typ_expectation_tags = possible_browser.GetTypExpectationsTags()
        logging.info(
            'The following expectations condition tags were generated %s',
            str(typ_expectation_tags))
        try:
            benchmark.expectations.SetTags(
                typ_expectation_tags,
                not finder_options.skip_typ_expectations_tags_validation)
        except ValueError as e:  # pylint: disable=broad-except
            traceback.print_exc(file=sys.stdout)
            logging.error(
                str(e) +
                '\nYou can use the --skip-typ-expectations-tags-validation '
                'argument to suppress this exception.')
            return -1

        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return -1

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')
        try:
            RunStorySet(test,
                        story_set,
                        finder_options,
                        results,
                        benchmark.max_failures,
                        expectations=benchmark.expectations,
                        max_num_values=benchmark.MAX_NUM_VALUES)
            if results.benchmark_interrupted:
                return_code = 2
            elif results.had_failures:
                return_code = 1
            elif results.had_successes:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = 2

        # TODO(crbug.com/981349): merge two calls to AddSharedDiagnostics
        # (see RunStorySet() method for the second one).
        results.AddSharedDiagnostics(
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
        )

        if finder_options.upload_results:
            results_processor.UploadArtifactsToCloud(results)
    return return_code
    def testTrafficSettings(self):
        story_set = story.StorySet()
        slow_page = page_module.Page(
            'file://green_rect.html',
            story_set,
            base_dir=util.GetUnittestDataDir(),
            name='slow',
            traffic_setting=traffic_setting_module.GOOD_3G)
        fast_page = page_module.Page(
            'file://green_rect.html',
            story_set,
            base_dir=util.GetUnittestDataDir(),
            name='fast',
            traffic_setting=traffic_setting_module.WIFI)
        story_set.AddStory(slow_page)
        story_set.AddStory(fast_page)

        latencies_by_page_in_ms = {}

        class MeasureLatency(legacy_page_test.LegacyPageTest):
            def __init__(self):
                super(MeasureLatency, self).__init__()
                self._will_navigate_time = None

            def WillNavigateToPage(self, page, tab):
                del page, tab  # unused
                self._will_navigate_time = time.time() * 1000

            def ValidateAndMeasurePage(self, page, tab, results):
                del results  # unused
                latencies_by_page_in_ms[page.name] = (time.time() * 1000 -
                                                      self._will_navigate_time)

        test = MeasureLatency()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True

        with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
            options.output_dir = tempdir
            SetUpStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            story_runner.Run(test,
                             story_set,
                             options,
                             results,
                             metadata=EmptyMetadataForTest())
            failure_messages = []
            for r in results.all_page_runs:
                if r.failure_str:
                    failure_messages.append(
                        'Failure message of story %s:\n%s' %
                        (r.story, r.failure_str))
            self.assertFalse(results.had_failures,
                             msg=''.join(failure_messages))
            self.assertIn('slow', latencies_by_page_in_ms)
            self.assertIn('fast', latencies_by_page_in_ms)
            # Slow page should be slower than fast page by at least 40 ms (roundtrip
            # time of good 3G) - 2 ms (roundtrip time of Wifi)
            self.assertGreater(latencies_by_page_in_ms['slow'],
                               latencies_by_page_in_ms['fast'] + 40 - 2)