def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations

    target_platform = None
    if possible_browser:
        target_platform = possible_browser.platform
    else:
        target_platform = platform_module.GetHostPlatform()

    can_run_on_platform = benchmark._CanRunOnPlatform(target_platform,
                                                      finder_options)

    expectations_disabled = False
    # For now, test expectations are only applicable in the cases where the
    # testing target involves a browser.
    if possible_browser:
        expectations_disabled = expectations.IsBenchmarkDisabled(
            possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       _GetPageSetRepeat(finder_options))
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnostic(reserved_infos.DOCUMENTATION_URLS.name,
                                        benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
Exemple #2
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
        return -1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            benchmark_enabled=True,
            should_add_value=benchmark.ShouldAddValue) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=benchmark.expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            if results.had_failures:
                return_code = 1
            elif results.had_successes_not_skipped:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception as e:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception: %s(%s)'
                % (type(e), e))

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       finder_options.pageset_repeat)
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.OWNERS.name, benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.BUG_COMPONENTS.name, benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnosticToAllHistograms(
                reserved_infos.DOCUMENTATION_URLS.name,
                benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
Exemple #3
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1

    can_run_on_platform = benchmark._CanRunOnPlatform(
        possible_browser.platform, finder_options)

    expectations_disabled = expectations.IsBenchmarkDisabled(
        possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                metadata=benchmark.GetMetadata(),
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except
            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')
            results.telemetry_info.InterruptBenchmark()
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadProfilingFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'benchmark_duration', 'minutes',
                                   duration / 60.0))
            results.AddDurationHistogram(duration * 1000.0)
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
Exemple #4
0
def RunBenchmark(benchmark, story_set, possible_browser,
                 browser_options, finder_options):
  """Run the benchmark on the given browser with given options.

  Args:
    benchmark: an instance of Benchmark class, a benchmark to be run.
    story_set: and instance of StorySet, a collection of stories.
    possible_browser: an instance of PossibleBrowser.
    browser_options: options to be passed to browser.
    finder_options: options controlling the execution of benchmark. This
      can be an instance of BrowserFinderOptions class, but only options
      relevant to benchmark execution will be read.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
  benchmark_metadata = benchmark.GetMetadata()
  page_test = benchmark.CreatePageTest(finder_options)
  page_test.__name__ = benchmark.__class__.__name__

  if isinstance(page_test, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in story_set):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      should_add_value=benchmark.ShouldAddValue,
      benchmark_enabled=True) as results:
    try:
      RunStorySet(
          page_test, story_set, possible_browser,
          benchmark.expectations, browser_options, finder_options, results,
          benchmark.max_failures, benchmark.MAX_NUM_VALUES)
      return_code = 1 if results.had_failures else 0
    except Exception as e: # pylint: disable=broad-except
      logging.fatal(
          'Benchmark execution interrupted by a fatal exception: %s(%s)' %
          (type(e), e))
      results.InterruptBenchmark(story_set, finder_options.pageset_repeat)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadArtifactsToCloud()
    finally:
      memory_debug.LogHostMemoryUsage()
      results.PrintSummary()
  return return_code