Exemplo n.º 1
0
def _IsBenchmarkEnabled(benchmark_class, possible_browser):
  return (issubclass(benchmark_class, benchmark.Benchmark) and
          decorators.IsBenchmarkEnabled(benchmark_class, possible_browser))
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1
    if (possible_browser and
            not decorators.IsBenchmarkEnabled(benchmark, possible_browser)):
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            print 'Try --also-run-disabled-tests to force the benchmark to run.'
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    benchmark.ValueCanBeAddedPredicate,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting domain.shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    with results_options.CreateResults(benchmark_metadata,
                                       finder_options,
                                       benchmark.ValueCanBeAddedPredicate,
                                       benchmark_enabled=True) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'BenchmarkDuration', 'minutes',
                                   duration / 60.0))
            results.PrintSummary()
    return return_code
Exemplo n.º 3
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
  start = time.time()
  benchmark.CustomizeBrowserOptions(finder_options.browser_options)

  benchmark_metadata = benchmark.GetMetadata()
  possible_browser = browser_finder.FindBrowser(finder_options)
  expectations = benchmark.InitializeExpectations()

  if not possible_browser:
    print ('Cannot find browser of type %s. To list out all '
           'available browsers, rerun your command with '
           '--browser=list' %  finder_options.browser_options.browser_type)
    return 1

  can_run_on_platform = benchmark._CanRunOnPlatform(possible_browser.platform,
                                                    finder_options)

  # TODO(rnephew): Remove decorators.IsBenchmarkEnabled and IsBenchmarkDisabled
  # when we have fully moved to _CanRunOnPlatform().
  permanently_disabled = expectations.IsBenchmarkDisabled(
      possible_browser.platform, finder_options)
  temporarily_disabled = not decorators.IsBenchmarkEnabled(
      benchmark, possible_browser)

  if permanently_disabled or temporarily_disabled or not can_run_on_platform:
    print '%s is disabled on the selected browser' % benchmark.Name()
    if finder_options.run_disabled_tests and not permanently_disabled:
      print 'Running benchmark anyway due to: --also-run-disabled-tests'
    else:
      print 'Try --also-run-disabled-tests to force the benchmark to run.'
      # If chartjson is specified, this will print a dict indicating the
      # benchmark name and disabled state.
      with results_options.CreateResults(
          benchmark_metadata, finder_options,
          benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False
          ) as results:
        results.PrintSummary()
      # When a disabled benchmark is run we now want to return success since
      # we are no longer filtering these out in the buildbot recipes.
      return 0

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  disabled_attr_name = decorators.DisabledAttributeName(benchmark)
  # pylint: disable=protected-access
  pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
  if hasattr(benchmark, '_enabled_strings'):
    # pylint: disable=protected-access
    pt._enabled_strings = benchmark._enabled_strings

  stories = benchmark.CreateStorySet(finder_options)

  if isinstance(pt, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in stories.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      benchmark.ValueCanBeAddedPredicate, benchmark_enabled=True) as results:
    try:
      Run(pt, stories, finder_options, results, benchmark.max_failures,
          expectations=expectations, metadata=benchmark.GetMetadata())
      return_code = min(254, len(results.failures))
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(stories)
    except Exception: # pylint: disable=broad-except
      results.telemetry_info.InterruptBenchmark()
      exception_formatter.PrintFormattedException()
      return_code = 255

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()

    if benchmark_owners:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadProfilingFilesToCloud()
    finally:
      duration = time.time() - start
      results.AddSummaryValue(scalar.ScalarValue(
          None, 'benchmark_duration', 'minutes', duration / 60.0))
      results.PrintSummary()
  return return_code