Beispiel #1
0
def load_tests(loader, standard_tests, pattern):
    del loader, standard_tests, pattern  # unused
    suite = progress_reporter.TestSuite()

    benchmarks_dir = os.path.dirname(__file__)
    top_level_dir = os.path.dirname(benchmarks_dir)

    # Using the default of |index_by_class_name=False| means that if a module
    # has multiple benchmarks, only the last one is returned.
    all_benchmarks = discover.DiscoverClasses(
        benchmarks_dir,
        top_level_dir,
        benchmark_module.Benchmark,
        index_by_class_name=False).values()
    for benchmark in all_benchmarks:
        if sys.modules[benchmark.__module__] in _BLACK_LIST_TEST_MODULES:
            continue
        # TODO(tonyg): Smoke doesn't work with session_restore yet.
        if (benchmark.Name().startswith('session_restore')
                or benchmark.Name().startswith('skpicture_printer')):
            continue

        if hasattr(benchmark, 'generated_profile_archive'):
            # We'd like to test these, but don't know how yet.
            continue

        class BenchmarkSmokeTest(unittest.TestCase):
            pass

        # tab_switching needs more than one page to test correctly.
        if 'tab_switching' in benchmark.Name():
            method = SmokeTestGenerator(benchmark, num_pages=2)
        else:
            method = SmokeTestGenerator(benchmark)

        # Make sure any decorators are propagated from the original declaration.
        # (access to protected members) pylint: disable=protected-access
        # TODO(dpranke): Since we only pick the first test from every class
        # (above), if that test is disabled, we'll end up not running *any*
        # test from the class. We should probably discover all of the tests
        # in a class, and then throw the ones we don't need away instead.

        disabled_benchmark_attr = decorators.DisabledAttributeName(benchmark)
        disabled_method_attr = decorators.DisabledAttributeName(method)
        enabled_benchmark_attr = decorators.EnabledAttributeName(benchmark)
        enabled_method_attr = decorators.EnabledAttributeName(method)

        MergeDecorators(method, disabled_method_attr, benchmark,
                        disabled_benchmark_attr)
        MergeDecorators(method, enabled_method_attr, benchmark,
                        enabled_benchmark_attr)

        setattr(BenchmarkSmokeTest, benchmark.Name(), method)

        suite.addTest(BenchmarkSmokeTest(benchmark.Name()))

    return suite
Beispiel #2
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    possible_browser = browser_finder.FindBrowser(finder_options)
    if possible_browser and benchmark.ShouldDisable(possible_browser):
        logging.warning('%s is disabled on the selected browser',
                        benchmark.Name())
        if finder_options.run_disabled_tests:
            logging.warning(
                'Running benchmark anyway due to: --also-run-disabled-tests')
        else:
            logging.warning(
                'Try --also-run-disabled-tests to force the benchmark to run.')
            return 1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)
    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    benchmark_metadata = benchmark.GetMetadata()
    with results_options.CreateResults(
            benchmark_metadata, finder_options,
            benchmark.ValueCanBeAddedPredicate) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            results.PrintSummary()
    return return_code
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1
    if (possible_browser and
            not decorators.IsBenchmarkEnabled(benchmark, possible_browser)):
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            print 'Try --also-run-disabled-tests to force the benchmark to run.'
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    benchmark.ValueCanBeAddedPredicate,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting domain.shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    with results_options.CreateResults(benchmark_metadata,
                                       finder_options,
                                       benchmark.ValueCanBeAddedPredicate,
                                       benchmark_enabled=True) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'BenchmarkDuration', 'minutes',
                                   duration / 60.0))
            results.PrintSummary()
    return return_code
Beispiel #4
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1

    can_run_on_platform = benchmark._CanRunOnPlatform(
        possible_browser.platform, finder_options)

    expectations_disabled = expectations.IsBenchmarkDisabled(
        possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                metadata=benchmark.GetMetadata(),
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except
            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')
            results.telemetry_info.InterruptBenchmark()
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadProfilingFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'benchmark_duration', 'minutes',
                                   duration / 60.0))
            results.AddDurationHistogram(duration * 1000.0)
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code
Beispiel #5
0
 def SetDisabledStrings(self, disabled_strings):
     # pylint: disable=attribute-defined-outside-init
     disabled_attr_name = decorators.DisabledAttributeName(self)
     setattr(self, disabled_attr_name, disabled_strings)
Beispiel #6
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
  start = time.time()
  benchmark.CustomizeBrowserOptions(finder_options.browser_options)

  benchmark_metadata = benchmark.GetMetadata()
  possible_browser = browser_finder.FindBrowser(finder_options)
  expectations = benchmark.InitializeExpectations()

  if not possible_browser:
    print ('Cannot find browser of type %s. To list out all '
           'available browsers, rerun your command with '
           '--browser=list' %  finder_options.browser_options.browser_type)
    return 1

  can_run_on_platform = benchmark._CanRunOnPlatform(possible_browser.platform,
                                                    finder_options)

  # TODO(rnephew): Remove decorators.IsBenchmarkEnabled and IsBenchmarkDisabled
  # when we have fully moved to _CanRunOnPlatform().
  permanently_disabled = expectations.IsBenchmarkDisabled(
      possible_browser.platform, finder_options)
  temporarily_disabled = not decorators.IsBenchmarkEnabled(
      benchmark, possible_browser)

  if permanently_disabled or temporarily_disabled or not can_run_on_platform:
    print '%s is disabled on the selected browser' % benchmark.Name()
    if finder_options.run_disabled_tests and not permanently_disabled:
      print 'Running benchmark anyway due to: --also-run-disabled-tests'
    else:
      print 'Try --also-run-disabled-tests to force the benchmark to run.'
      # If chartjson is specified, this will print a dict indicating the
      # benchmark name and disabled state.
      with results_options.CreateResults(
          benchmark_metadata, finder_options,
          benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False
          ) as results:
        results.PrintSummary()
      # When a disabled benchmark is run we now want to return success since
      # we are no longer filtering these out in the buildbot recipes.
      return 0

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  disabled_attr_name = decorators.DisabledAttributeName(benchmark)
  # pylint: disable=protected-access
  pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
  if hasattr(benchmark, '_enabled_strings'):
    # pylint: disable=protected-access
    pt._enabled_strings = benchmark._enabled_strings

  stories = benchmark.CreateStorySet(finder_options)

  if isinstance(pt, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in stories.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      benchmark_metadata, finder_options,
      benchmark.ValueCanBeAddedPredicate, benchmark_enabled=True) as results:
    try:
      Run(pt, stories, finder_options, results, benchmark.max_failures,
          expectations=expectations, metadata=benchmark.GetMetadata())
      return_code = min(254, len(results.failures))
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(stories)
    except Exception: # pylint: disable=broad-except
      results.telemetry_info.InterruptBenchmark()
      exception_formatter.PrintFormattedException()
      return_code = 255

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()

    if benchmark_owners:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.histograms.AddSharedDiagnostic(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    try:
      if finder_options.upload_results:
        results.UploadTraceFilesToCloud()
        results.UploadProfilingFilesToCloud()
    finally:
      duration = time.time() - start
      results.AddSummaryValue(scalar.ScalarValue(
          None, 'benchmark_duration', 'minutes', duration / 60.0))
      results.PrintSummary()
  return return_code