Esempio n. 1
0
  def ProcessCommandLineArgs(cls, parser, args, environment):
    all_benchmarks = _Benchmarks(environment)
    if not args.positional_args:
      possible_browser = (
          browser_finder.FindBrowser(args) if args.browser_type else None)
      PrintBenchmarkList(all_benchmarks, possible_browser)
      sys.exit(-1)

    input_benchmark_name = args.positional_args[0]
    matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
    if not matching_benchmarks:
      print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
      print >> sys.stderr
      most_likely_matched_benchmarks = command_line.GetMostLikelyMatchedObject(
          all_benchmarks, input_benchmark_name, lambda x: x.Name())
      if most_likely_matched_benchmarks:
        print >> sys.stderr, 'Do you mean any of those benchmarks below?'
        PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
      sys.exit(-1)

    if len(matching_benchmarks) > 1:
      print >> sys.stderr, ('Multiple benchmarks named "%s".' %
                            input_benchmark_name)
      print >> sys.stderr, 'Did you mean one of these?'
      print >> sys.stderr
      PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
      sys.exit(-1)

    benchmark_class = matching_benchmarks.pop()
    if len(args.positional_args) > 1:
      parser.error('Too many arguments.')

    assert issubclass(benchmark_class, benchmark.Benchmark), (
        'Trying to run a non-Benchmark?!')

    benchmark.ProcessCommandLineArgs(parser, args)
    benchmark_class.ProcessCommandLineArgs(parser, args)

    cls._benchmark = benchmark_class
  def testWebPageReplay(self):
    story_set = example_domain.ExampleDomainPageSet()
    body = []

    class TestWpr(legacy_page_test.LegacyPageTest):
      def ValidateAndMeasurePage(self, page, tab, results):
        del page, results  # unused
        body.append(tab.EvaluateJavaScript('document.body.innerText'))

      def DidRunPage(self, platform):
        # Force the replay server to restart between pages; this verifies that
        # the restart mechanism works.
        platform.network_controller.StopReplay()

    test = TestWpr()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    possible_browser = browser_finder.FindBrowser(options)
    story_runner.RunStorySet(
        test=test,
        story_set=story_set,
        possible_browser=possible_browser,
        expectations=None,
        browser_options=options.browser_options,
        finder_options=options,
        results=results,
    )

    self.longMessage = True
    self.assertIn('Example Domain', body[0],
                  msg='URL: %s' % story_set.stories[0].url)
    self.assertIn('Example Domain', body[1],
                  msg='URL: %s' % story_set.stories[1].url)

    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
    self.assertFalse(results.had_failures)
Esempio n. 3
0
    def ProcessCommandLineArgs(cls, parser, options, environment):
        all_benchmarks = environment.GetBenchmarks()
        if environment.expectations_files:
            assert len(environment.expectations_files) == 1
            expectations_file = environment.expectations_files[0]
        else:
            expectations_file = None
        if not options.positional_args:
            possible_browser = (browser_finder.FindBrowser(options)
                                if options.browser_type else None)
            PrintBenchmarkList(all_benchmarks, possible_browser,
                               expectations_file)
            sys.exit(-1)

        benchmark_name = options.positional_args[0]
        benchmark_class = environment.GetBenchmarkByName(benchmark_name)
        if benchmark_class is None:
            print >> sys.stderr, 'No benchmark named "%s".' % benchmark_name
            print >> sys.stderr
            most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
                all_benchmarks, benchmark_name, lambda x: x.Name())
            if most_likely_matched_benchmarks:
                print >> sys.stderr, 'Do you mean any of those benchmarks below?'
                PrintBenchmarkList(most_likely_matched_benchmarks, None,
                                   expectations_file, sys.stderr)
            sys.exit(-1)

        if len(options.positional_args) > 1:
            parser.error('Too many arguments.')

        assert issubclass(
            benchmark_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        benchmark.ProcessCommandLineArgs(parser, options)
        benchmark_class.ProcessCommandLineArgs(parser, options)

        cls._benchmark = benchmark_class
        cls._expectations_path = expectations_file
Esempio n. 4
0
    def testPushDefaultProfileDir(self):
        # Add a few files and directories to a temp directory, and ensure they are
        # copied to the device.
        with tempfile_ext.NamedTemporaryDirectory() as tempdir:
            foo_path = os.path.join(tempdir, 'foo')
            with open(foo_path, 'w') as f:
                f.write('foo_data')

            bar_path = os.path.join(tempdir, 'path', 'to', 'bar')
            os.makedirs(os.path.dirname(bar_path))
            with open(bar_path, 'w') as f:
                f.write('bar_data')

            expected_profile_paths = [
                'foo', posixpath.join('path', 'to', 'bar')
            ]

            finder_options = options_for_unittests.GetCopy()
            finder_options.browser_options.profile_dir = tempdir
            browser_to_create = browser_finder.FindBrowser(finder_options)

            # SetUpEnvironment will end up calling PushProfile
            try:
                browser_to_create.SetUpEnvironment(
                    finder_options.browser_options)

                profile_dir = browser_to_create.profile_directory
                device = browser_to_create._platform_backend.device

                absolute_expected_profile_paths = [
                    posixpath.join(profile_dir, path)
                    for path in expected_profile_paths
                ]
                device = browser_to_create._platform_backend.device
                self.assertTrue(
                    device.PathExists(absolute_expected_profile_paths),
                    absolute_expected_profile_paths)
            finally:
                browser_to_create.CleanUpEnvironment()
 def CustomizeBrowserOptions(self, options):
     # --test-type=gpu is used only to suppress the "Google API Keys are missing"
     # infobar, which causes flakiness in tests.
     options.AppendExtraBrowserArgs([
         '--disable-gesture-requirement-for-media-playback',
         '--disable-domain-blocking-for-3d-apis',
         '--disable-gpu-process-crash-limit', '--js-flags=--expose-gc',
         '--test-type=gpu', '--enable-experimental-canvas-features'
     ])
     browser = browser_finder.FindBrowser(options.finder_options)
     if (browser.target_os.startswith('android')
             and browser.browser_type == 'android-webview-shell'):
         # TODO(kbr): this is overly broad. We'd like to do this only on
         # Nexus 9. It'll go away shortly anyway. crbug.com/499928
         #
         # The --ignore_egl_sync_failures is only there to work around
         # some strange failure on the Nexus 9 bot, not reproducible on
         # local hardware.
         options.AppendExtraBrowserArgs([
             '--disable-gl-extensions=GL_EXT_disjoint_timer_query',
             '--ignore_egl_sync_failures',
         ])
  def testRunPageWithStartupUrl(self):
    num_times_browser_closed = [0]

    class TestSharedState(shared_page_state.SharedPageState):

      def _StopBrowser(self):
        super(TestSharedState, self)._StopBrowser()
        num_times_browser_closed[0] += 1

    story_set = story.StorySet()
    page = page_module.Page(
        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
        startup_url='about:blank', shared_page_state_class=TestSharedState)
    story_set.AddStory(page)

    class Measurement(legacy_page_test.LegacyPageTest):

      def __init__(self):
        super(Measurement, self).__init__()

      def ValidateAndMeasurePage(self, page, tab, results):
        del page, tab, results  # not used

    options = options_for_unittests.GetCopy()
    options.pageset_repeat = 2
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    if not browser_finder.FindBrowser(options):
      return
    test = Measurement()
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(test, story_set, options, results,
        metadata=EmptyMetadataForTest())
    self.assertEquals('about:blank', options.browser_options.startup_url)
    # _StopBrowser should be called 2 times:
    # 1. browser restarts after page 1 run
    # 2. in the TearDownState after all the pages have run.
    self.assertEquals(num_times_browser_closed[0], 2)
Esempio n. 7
0
 def setUp(self):
     """ Copy the manifest and background.js files of simple_extension to a
 number of temporary directories to load as extensions"""
     self._extension_dirs = [tempfile.mkdtemp() for _ in range(3)]
     src_extension_dir = os.path.join(util.GetUnittestDataDir(),
                                      'simple_extension')
     manifest_path = os.path.join(src_extension_dir, 'manifest.json')
     script_path = os.path.join(src_extension_dir, 'background.js')
     for d in self._extension_dirs:
         shutil.copy(manifest_path, d)
         shutil.copy(script_path, d)
     options = options_for_unittests.GetCopy()
     self._extensions_to_load = [
         extension_to_load.ExtensionToLoad(d, options.browser_type)
         for d in self._extension_dirs
     ]
     options.extensions_to_load = self._extensions_to_load
     browser_to_create = browser_finder.FindBrowser(options)
     self._browser = None
     # May not find a browser that supports extensions.
     if browser_to_create:
         self._browser = browser_to_create.Create(options)
Esempio n. 8
0
 def __init__(self, extra_browser_args=None, username=None, password=None):
     finder_options = browser_options.BrowserFinderOptions()
     finder_options.browser_type = ('system')
     if extra_browser_args:
         finder_options.browser_options.AppendExtraBrowserArgs(
             extra_browser_args)
     finder_options.verbosity = 0
     finder_options.CreateParser().parse_args(args=[])
     b_options = finder_options.browser_options
     b_options.disable_component_extensions_with_background_pages = False
     b_options.create_browser_with_oobe = True
     b_options.clear_enterprise_policy = True
     b_options.dont_override_profile = False
     b_options.disable_gaia_services = True
     b_options.disable_default_apps = True
     b_options.disable_component_extensions_with_background_pages = True
     b_options.auto_login = True
     b_options.gaia_login = False
     b_options.gaia_id = b_options.gaia_id
     open('/mnt/stateful_partition/etc/collect_chrome_crashes', 'w').close()
     browser_to_create = browser_finder.FindBrowser(finder_options)
     self._browser = browser_to_create.Create(finder_options)
    def __init__(self, test, finder_options, story_set):
        """
    Args:
      test: (unused)
      finder_options: A finder_options object
      story_set: (unused)
    """
        super(SharedAndroidStoryState, self).__init__(test, finder_options,
                                                      story_set)
        self._finder_options = finder_options
        self._possible_browser = browser_finder.FindBrowser(
            self._finder_options)
        self._current_story = None

        # This is an Android-only shared state.
        assert isinstance(self.platform, android_platform.AndroidPlatform)
        self._finder_options.browser_options.browser_user_agent_type = 'mobile'

        # TODO: This will always use live sites. Should use options to configure
        # network_controller properly. See e.g.: https://goo.gl/nAsyFr
        self.platform.network_controller.Open(wpr_modes.WPR_OFF)
        self.platform.Initialize()
Esempio n. 10
0
  def RunTest(self):

    class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
      def CreateStorySet(self, options):
        # pylint: disable=super-on-old-class
        story_set = super(SinglePageBenchmark, self).CreateStorySet(options)
        stories_to_remove = [s for s in story_set.stories if s !=
                             story_to_smoke_test]
        for s in stories_to_remove:
          story_set.RemoveStory(s)
        assert story_set.stories
        return story_set

    options = GenerateBenchmarkOptions(benchmark_class)

    # Prevent benchmarks from accidentally trying to upload too much data to the
    # chromeperf dashboard. The number of values uploaded is equal to (the
    # average number of values produced by a single story) * (1 + (the number of
    # stories)). The "1 + " accounts for values summarized across all stories.
    # We can approximate "the average number of values produced by a single
    # story" as the number of values produced by the given story.
    # pageset_repeat doesn't matter because values are summarized across
    # repetitions before uploading.
    story_set = benchmark_class().CreateStorySet(options)
    SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set.stories)

    possible_browser = browser_finder.FindBrowser(options)
    if possible_browser is None:
      self.skipTest('Cannot find the browser to run the test.')

    if self.id() in _DISABLED_TESTS:
      self.skipTest('Test is explicitly disabled')

    single_page_benchmark = SinglePageBenchmark()
    with open(path_util.GetExpectationsPath()) as fp:
      single_page_benchmark.AugmentExpectationsWithParser(fp.read())

    self.assertEqual(0, single_page_benchmark.Run(options),
                     msg='Failed: %s' % benchmark_class)
    def Run(self, options):
        """Kick off the process.

    Args:
      options: Instance of BrowserFinderOptions to search for proper browser.

    Returns:
      A 2-tuple (path, new_profile).

      path: The path of the generated profile or existing profile if
      --profile-dir is given. Could be None if it's generated on default
      location (e.g., cryptohome on CrOS).

      new_profile: Whether a new profile has been generated. If this is True,
      the caller is responsible for deleting the profile.
    """
        possible_browser = browser_finder.FindBrowser(options)

        if possible_browser.browser_type.startswith('cros'):
            self.Create(options, None)
            return (None, False)

        # Use the given --profile-dir.
        if options.browser_options.profile_dir:
            return (options.browser_options.profile_dir, False)

        out_dir = os.path.abspath(
            os.path.join(tempfile.gettempdir(), self._profile_name,
                         self._profile_name))

        # Never reuse a generated profile, since it might be for a different version
        # of Chrome.
        if os.path.exists(out_dir):
            assert os.path.isdir(out_dir)
            shutil.rmtree(out_dir)

        self.Create(options, out_dir)
        return (out_dir, True)
Esempio n. 12
0
    def ProcessCommandLineArgs(cls, parser, options, environment):
        all_benchmarks = environment.GetBenchmarks()
        if environment.expectations_files:
            assert len(environment.expectations_files) == 1
            expectations_file = environment.expectations_files[0]
        else:
            expectations_file = None
        if not options.positional_args:
            possible_browser = (browser_finder.FindBrowser(options)
                                if options.browser_type else None)
            PrintBenchmarkList(all_benchmarks, possible_browser,
                               expectations_file)
            parser.error('missing required argument: benchmark_name')

        benchmark_name = options.positional_args[0]
        benchmark_class = environment.GetBenchmarkByName(benchmark_name)
        if benchmark_class is None:
            most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
                all_benchmarks, benchmark_name, lambda x: x.Name())
            if most_likely_matched_benchmarks:
                print >> sys.stderr, 'Do you mean any of those benchmarks below?'
                PrintBenchmarkList(most_likely_matched_benchmarks, None,
                                   expectations_file, sys.stderr)
            parser.error('no such benchmark: %s' % benchmark_name)

        if len(options.positional_args) > 1:
            parser.error('unrecognized arguments: %s' %
                         ' '.join(options.positional_args[1:]))

        assert issubclass(
            benchmark_class,
            benchmark.Benchmark), ('Trying to run a non-Benchmark?!')

        story_runner.ProcessCommandLineArgs(parser, options)
        benchmark_class.ProcessCommandLineArgs(parser, options)

        cls._benchmark = benchmark_class
        cls._expectations_path = expectations_file
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1
        benchmark.options['page_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)
                return story_set

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        possible_browser = browser_finder.FindBrowser(options)
        if SinglePageBenchmark.ShouldDisable(possible_browser):
            self.skipTest('Benchmark %s has ShouldDisable return True' %
                          SinglePageBenchmark.Name())

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
Esempio n. 14
0
 def __init__(self, test, finder_options, story_set):
   """
   Args:
     test: opaquely passed to parent class constructor.
     finder_options: A BrowserFinderOptions object.
     story_set: opaquely passed to parent class constructor.
   """
   super(_MobileStartupSharedState, self).__init__(
       test, finder_options, story_set)
   self._finder_options = finder_options
   self._possible_browser = browser_finder.FindBrowser(self._finder_options)
   self._current_story = None
   # Allow using this shared state only on Android.
   assert isinstance(self.platform, android_platform.AndroidPlatform)
   self._finder_options.browser_options.browser_user_agent_type = 'mobile'
   self.platform.Initialize()
   assert finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD, (
       'Recording WPR archives is not supported for this benchmark.')
   wpr_mode = wpr_modes.WPR_REPLAY
   if finder_options.use_live_sites:
     wpr_mode = wpr_modes.WPR_OFF
   self.platform.network_controller.Open(wpr_mode)
   self._story_set = story_set
Esempio n. 15
0
  def RunTest(self):

    class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
      def CreateStorySet(self, options):
        # pylint: disable=super-on-old-class
        story_set = super(SinglePageBenchmark, self).CreateStorySet(options)
        assert story_to_smoke_test in story_set.stories
        story_set.stories = [story_to_smoke_test]
        return story_set

    options = GenerateBenchmarkOptions(benchmark_class)
    possible_browser = browser_finder.FindBrowser(options)
    if possible_browser is None:
      self.skipTest('Cannot find the browser to run the test.')
    if (SinglePageBenchmark.ShouldDisable(possible_browser) or
        not decorators.IsEnabled(benchmark_class, possible_browser)[0]):
      self.skipTest('Benchmark %s is disabled' % SinglePageBenchmark.Name())

    if self.id() in _DISABLED_TESTS:
      self.skipTest('Test is explictly disabled')

    self.assertEqual(0, SinglePageBenchmark().Run(options),
                     msg='Failed: %s' % benchmark_class)
Esempio n. 16
0
  def _PrepareBrowser(self, browser_type, options):
    """Add a browser to the dict of possible browsers.

    TODO(perezju): When available, use the GetBrowserForPlatform API instead.
    See: crbug.com/570348

    Returns:
      The possible browser if found, or None otherwise.
    """
    possible_browser = browser_finder.FindBrowser(options)
    if possible_browser is None:
      return None

    if self._platform is None:
      self._platform = possible_browser.platform
      # TODO(nedn): Remove the if condition once
      # https://codereview.chromium.org/2265593003/ is rolled to Chromium tree.
      if hasattr(self._platform.network_controller, 'InitializeIfNeeded'):
        self._platform.network_controller.InitializeIfNeeded()
    else:
      assert self._platform is possible_browser.platform
    self._possible_browsers[browser_type] = (possible_browser, options)
    return possible_browser
Esempio n. 17
0
    def ProcessCommandLineArgs(cls, parser, args, _):
        # We retry failures by default unless we're running a list of tests
        # explicitly.
        if not args.retry_limit and not args.positional_args:
            args.retry_limit = 3

        if args.test_filter and args.positional_args:
            parser.error('Cannot specify test names in positional args and use'
                         '--test-filter flag at the same time.')

        if args.no_browser:
            return

        if args.start_xvfb and xvfb.ShouldStartXvfb():
            cls.xvfb_process = xvfb.StartXvfb()
            # Work around Mesa issues on Linux. See
            # https://github.com/catapult-project/catapult/issues/3074
            args.browser_options.AppendExtraBrowserArgs('--disable-gpu')

        try:
            possible_browser = browser_finder.FindBrowser(args)
        except browser_finder_exceptions.BrowserFinderException, ex:
            parser.error(ex)
Esempio n. 18
0
    def _GetPossibleBrowser(self, test, finder_options):
        """Return a possible_browser with the given options. """
        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            raise browser_finder_exceptions.BrowserFinderException(
                'No browser found.\n\nAvailable browsers:\n%s\n' % '\n'.join(
                    browser_finder.GetAllAvailableBrowserTypes(finder_options))
            )
        finder_options.browser_options.browser_type = (
            possible_browser.browser_type)

        (enabled, msg) = decorators.IsEnabled(test, possible_browser)
        if (not enabled and not finder_options.run_disabled_tests):
            logging.warning(msg)
            logging.warning('You are trying to run a disabled test.')
            logging.warning(
                'Pass --also-run-disabled-tests to squelch this message.')
            sys.exit(0)

        if possible_browser.IsRemote():
            possible_browser.RunRemote()
            sys.exit(0)
        return possible_browser
Esempio n. 19
0
  def SetBrowserOptions(cls, browser_options):
    """Sets the browser option for the browser to create.

    Args:
      browser_options: Browser options object for the browser we want to test.
    """
    cls._browser_options = browser_options
    cls._browser_to_create = browser_finder.FindBrowser(browser_options)
    if not cls._browser_to_create:
      raise browser_finder_exceptions.BrowserFinderException(
          'Cannot find browser of type %s. \n\nAvailable browsers:\n%s\n' % (
              browser_options.browser_options.browser_type,
              '\n'.join(browser_finder.GetAllAvailableBrowserTypes(
                  browser_options))))
    if not cls.platform:
      cls.platform = cls._browser_to_create.platform
      cls.platform.SetFullPerformanceModeEnabled(
          browser_options.full_performance_mode)
      cls.platform.network_controller.Open()
    else:
      assert cls.platform == cls._browser_to_create.platform, (
          'All browser launches within same test suite must use browsers on '
          'the same platform')
Esempio n. 20
0
    def _GetPossibleBrowser(self):
        """Return a possible_browser with the given options."""
        possible_browser = browser_finder.FindBrowser(self._finder_options)
        if not possible_browser:
            raise browser_finder_exceptions.BrowserFinderException(
                'Cannot find browser of type %s. \n\nAvailable browsers:\n%s\n'
                %
                (self._finder_options.browser_options.browser_type, '\n'.join(
                    browser_finder.GetAllAvailableBrowserTypes(
                        self._finder_options))))

        self._finder_options.browser_options.browser_type = (
            possible_browser.browser_type)

        if self._page_test:
            # Check for Enabled/Disabled decorators on page_test.
            skip, msg = decorators.ShouldSkip(self._page_test,
                                              possible_browser)
            if skip and not self._finder_options.run_disabled_tests:
                logging.warning(msg)
                logging.warning('You are trying to run a disabled test.')

        return possible_browser
Esempio n. 21
0
    def _CreateBrowser(self,
                       autotest_ext=False,
                       auto_login=True,
                       gaia_login=False,
                       username=None,
                       password=None,
                       gaia_id=None,
                       dont_override_profile=False):
        """Finds and creates a browser for tests. if autotest_ext is True,
    also loads the autotest extension"""
        options = options_for_unittests.GetCopy()

        if autotest_ext:
            extension_path = os.path.join(util.GetUnittestDataDir(),
                                          'autotest_ext')
            assert os.path.isdir(extension_path)
            self._load_extension = extension_to_load.ExtensionToLoad(
                path=extension_path,
                browser_type=options.browser_type,
                is_component=True)
            options.browser_options.extensions_to_load = [self._load_extension]

        browser_to_create = browser_finder.FindBrowser(options)
        self.assertTrue(browser_to_create)
        browser_options = options.browser_options
        browser_options.create_browser_with_oobe = True
        browser_options.auto_login = auto_login
        browser_options.gaia_login = gaia_login
        browser_options.dont_override_profile = dont_override_profile
        if username is not None:
            browser_options.username = username
        if password is not None:
            browser_options.password = password
        if gaia_id is not None:
            browser_options.gaia_id = gaia_id

        return browser_to_create.Create(options)
Esempio n. 22
0
 def __init__(self, test, finder_options, story_set, possible_browser=None):
     """
 Args:
   test: opaquely passed to parent class constructor.
   finder_options: A BrowserFinderOptions object.
   story_set: opaquely passed to parent class constructor.
 """
     super(_MobileStartupSharedState,
           self).__init__(test, finder_options, story_set, possible_browser)
     self._finder_options = finder_options
     if not self._possible_browser:
         self._possible_browser = browser_finder.FindBrowser(
             self._finder_options)
     self._current_story = None
     # Allow using this shared state only on Android.
     assert isinstance(self.platform, android_platform.AndroidPlatform)
     self._finder_options.browser_options.browser_user_agent_type = 'mobile'
     self._finder_options.browser_options.AppendExtraBrowserArgs(
         '--skip-webapk-verification')
     self.platform.Initialize()
     self.platform.SetFullPerformanceModeEnabled(True)
     maps_webapk = core_util.FindLatestApkOnHost(finder_options.chrome_root,
                                                 'MapsWebApk.apk')
     if not maps_webapk:
         raise Exception('MapsWebApk not found! Follow the Mini-HOWTO in '
                         'startup_mobile.py')
     self.platform.InstallApplication(maps_webapk)
     wpr_mode = wpr_modes.WPR_REPLAY
     self._number_of_iterations = _NUMBER_OF_ITERATIONS
     if finder_options.use_live_sites:
         wpr_mode = wpr_modes.WPR_OFF
     elif finder_options.browser_options.wpr_mode == wpr_modes.WPR_RECORD:
         wpr_mode = wpr_modes.WPR_RECORD
         # When recording a WPR archive only load the story page once.
         self._number_of_iterations = 1
     self.platform.network_controller.Open(wpr_mode)
     self._story_set = story_set
Esempio n. 23
0
  def ProcessCommandLineArgs(cls, parser, args, _):
    if args.verbosity >= 2:
      logging.getLogger().setLevel(logging.DEBUG)
    elif args.verbosity:
      logging.getLogger().setLevel(logging.INFO)
    else:
      logging.getLogger().setLevel(logging.WARNING)

    # We retry failures by default unless we're running a list of tests
    # explicitly.
    if not args.retry_limit and not args.positional_args:
      args.retry_limit = 3

    if args.test_filter and args.positional_args:
      parser.error(
          'Cannot specify test names in positional args and use'
          '--test-filter flag at the same time.')

    if args.no_browser:
      return

    if args.start_xvfb and xvfb.ShouldStartXvfb():
      cls.xvfb_process = xvfb.StartXvfb()
      # Work around Mesa issues on Linux. See
      # https://github.com/catapult-project/catapult/issues/3074
      args.browser_options.AppendExtraBrowserArgs('--disable-gpu')

    try:
      possible_browser = browser_finder.FindBrowser(args)
    except browser_finder_exceptions.BrowserFinderException as ex:
      parser.error(ex)

    if not possible_browser:
      parser.error('No browser found of type %s. Cannot run tests.\n'
                   'Re-run with --browser=list to see '
                   'available browser types.' % args.browser_type)
Esempio n. 24
0
    def setUpClass(cls):
        global current_browser
        global current_browser_options

        options = options_for_unittests.GetCopy()

        cls.CustomizeBrowserOptions(options.browser_options)
        if not current_browser or (current_browser_options !=
                                   options.browser_options):
            if current_browser:
                teardown_browser()

            browser_to_create = browser_finder.FindBrowser(options)
            if not browser_to_create:
                raise Exception('No browser found, cannot continue test.')

            try:
                current_browser = browser_to_create.Create(options)
                current_browser_options = options.browser_options
            except:
                cls.tearDownClass()
                raise
        cls._browser = current_browser
        cls._device = options.device
Esempio n. 25
0
# Initialize the dependency manager
from telemetry.internal.util import binary_manager
from chrome_telemetry_build import chromium_config
binary_manager.InitDependencyManager(
    chromium_config.ChromiumConfig().client_config)

from telemetry.timeline import tracing_config

from json import dumps

options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
(_, args) = parser.parse_args()

browserFactory = browser_finder.FindBrowser(options)

with browserFactory.Create(options) as browser:
    tab = browser.tabs.New()
    tab.Activate()
    for i in browser.tabs:
        if i == tab:
            continue
        i.Close()

    config = tracing_config.TracingConfig()
    config.enable_chrome_trace = True
    browser.platform.tracing_controller.StartTracing(config)
    tab.Navigate(args[0])
    tab.WaitForDocumentReadyStateToBeComplete()
    browser.platform.tracing_controller.StopTracing().Serialize(sys.stdout)
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    start = time.time()
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
        print(
            'Cannot find browser of type %s. To list out all '
            'available browsers, rerun your command with '
            '--browser=list' % finder_options.browser_options.browser_type)
        return 1
    if (possible_browser and
            not decorators.IsBenchmarkEnabled(benchmark, possible_browser)):
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            print 'Try --also-run-disabled-tests to force the benchmark to run.'
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    benchmark.ValueCanBeAddedPredicate,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting domain.shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    with results_options.CreateResults(benchmark_metadata,
                                       finder_options,
                                       benchmark.ValueCanBeAddedPredicate,
                                       benchmark_enabled=True) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            duration = time.time() - start
            results.AddSummaryValue(
                scalar.ScalarValue(None, 'BenchmarkDuration', 'minutes',
                                   duration / 60.0))
            results.PrintSummary()
    return return_code
Esempio n. 27
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            print('No browser of type "%s" found for running benchmark "%s".' %
                  (finder_options.browser_options.browser_type,
                   benchmark.Name()))
            return -1
        typ_expectation_tags = possible_browser.GetTypExpectationsTags()
        logging.info(
            'The following expectations condition tags were generated %s',
            str(typ_expectation_tags))
        try:
            benchmark.expectations.SetTags(
                typ_expectation_tags,
                not finder_options.skip_typ_expectations_tags_validation)
        except ValueError as e:  # pylint: disable=broad-except
            traceback.print_exc(file=sys.stdout)
            logging.error(
                str(e) +
                '\nYou can use the --skip-typ-expectations-tags-validation '
                'argument to suppress this exception.')
            return -1

        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return -1

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')
        try:
            RunStorySet(test,
                        story_set,
                        finder_options,
                        results,
                        benchmark.max_failures,
                        expectations=benchmark.expectations,
                        max_num_values=benchmark.MAX_NUM_VALUES)
            if results.benchmark_interrupted:
                return_code = 2
            elif results.had_failures:
                return_code = 1
            elif results.had_successes:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = 2

        # TODO(crbug.com/981349): merge two calls to AddSharedDiagnostics
        # (see RunStorySet() method for the second one).
        results.AddSharedDiagnostics(
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
        )

        if finder_options.upload_results:
            results_processor.UploadArtifactsToCloud(results)
    return return_code
Esempio n. 28
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    The number of failure values (up to 254) or 255 if there is an uncaught
    exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    possible_browser = browser_finder.FindBrowser(finder_options)
    if possible_browser and benchmark.ShouldDisable(possible_browser):
        logging.warning('%s is disabled on the selected browser',
                        benchmark.Name())
        if finder_options.run_disabled_tests:
            logging.warning(
                'Running benchmark anyway due to: --also-run-disabled-tests')
        else:
            logging.warning(
                'Try --also-run-disabled-tests to force the benchmark to run.')
            return 1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    disabled_attr_name = decorators.DisabledAttributeName(benchmark)
    # pylint: disable=protected-access
    pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
    if hasattr(benchmark, '_enabled_strings'):
        # pylint: disable=protected-access
        pt._enabled_strings = benchmark._enabled_strings

    stories = benchmark.CreateStorySet(finder_options)
    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    should_tear_down_state_after_each_story_run = (
        benchmark.ShouldTearDownStateAfterEachStoryRun())
    # HACK: restarting shared state has huge overhead on cros (crbug.com/645329),
    # hence we default this to False when test is run against CrOS.
    # TODO(cros-team): figure out ways to remove this hack.
    if (possible_browser.platform.GetOSName() == 'chromeos' and
            not benchmark.IsShouldTearDownStateAfterEachStoryRunOverriden()):
        should_tear_down_state_after_each_story_run = False

    benchmark_metadata = benchmark.GetMetadata()
    with results_options.CreateResults(
            benchmark_metadata, finder_options,
            benchmark.ValueCanBeAddedPredicate) as results:
        try:
            Run(pt, stories, finder_options, results, benchmark.max_failures,
                should_tear_down_state_after_each_story_run,
                benchmark.ShouldTearDownStateAfterEachStorySetRun())
            return_code = min(254, len(results.failures))
        except Exception:
            exception_formatter.PrintFormattedException()
            return_code = 255

        try:
            if finder_options.upload_results:
                bucket = finder_options.upload_bucket
                if bucket in cloud_storage.BUCKET_ALIASES:
                    bucket = cloud_storage.BUCKET_ALIASES[bucket]
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)
        finally:
            results.PrintSummary()
    return return_code
Esempio n. 29
0
    def __init__(self,
                 logged_in=True,
                 extension_paths=None,
                 autotest_ext=False,
                 num_tries=3,
                 extra_browser_args=None,
                 clear_enterprise_policy=True,
                 expect_policy_fetch=False,
                 dont_override_profile=False,
                 disable_gaia_services=True,
                 disable_default_apps=True,
                 auto_login=True,
                 gaia_login=False,
                 username=None,
                 password=None,
                 gaia_id=None,
                 arc_mode=None,
                 arc_timeout=None,
                 disable_arc_opt_in=True,
                 disable_arc_opt_in_verification=True,
                 disable_arc_cpu_restriction=True,
                 disable_app_sync=False,
                 disable_play_auto_install=False,
                 disable_locale_sync=True,
                 disable_play_store_auto_update=True,
                 enable_assistant=False,
                 enterprise_arc_test=False,
                 init_network_controller=False,
                 mute_audio=False,
                 proxy_server=None,
                 login_delay=0):
        """
        Constructor of telemetry wrapper.

        @param logged_in: Regular user (True) or guest user (False).
        @param extension_paths: path of unpacked extension to install.
        @param autotest_ext: Load a component extension with privileges to
                             invoke chrome.autotestPrivate.
        @param num_tries: Number of attempts to log in.
        @param extra_browser_args: Additional argument(s) to pass to the
                                   browser. It can be a string or a list.
        @param clear_enterprise_policy: Clear enterprise policy before
                                        logging in.
        @param expect_policy_fetch: Expect that chrome can reach the device
                                    management server and download policy.
        @param dont_override_profile: Don't delete cryptohome before login.
                                      Telemetry will output a warning with this
                                      option.
        @param disable_gaia_services: For enterprise autotests, this option may
                                      be used to enable policy fetch.
        @param disable_default_apps: For tests that exercise default apps.
        @param auto_login: Does not login automatically if this is False.
                           Useful if you need to examine oobe.
        @param gaia_login: Logs in to real gaia.
        @param username: Log in using this username instead of the default.
        @param password: Log in using this password instead of the default.
        @param gaia_id: Log in using this gaia_id instead of the default.
        @param arc_mode: How ARC instance should be started.  Default is to not
                         start.
        @param arc_timeout: Timeout to wait for ARC to boot.
        @param disable_arc_opt_in: For opt in flow autotest. This option is used
                                   to disable the arc opt in flow.
        @param disable_arc_opt_in_verification:
             Adds --disable-arc-opt-in-verification to browser args. This should
             generally be enabled when disable_arc_opt_in is enabled. However,
             for data migration tests where user's home data is already set up
             with opted-in state before login, this option needs to be set to
             False with disable_arc_opt_in=True to make ARC container work.
        @param disable_arc_cpu_restriction:
             Adds --disable-arc-cpu-restriction to browser args. This is enabled
             by default and will make tests run faster and is generally
             desirable unless a test is actually trying to test performance
             where ARC is running in the background for some porition of the
             test.
        @param disable_app_sync:
            Adds --arc-disable-app-sync to browser args and this disables ARC
            app sync flow. By default it is enabled.
        @param disable_play_auto_install:
            Adds --arc-disable-play-auto-install to browser args and this
            disables ARC Play Auto Install flow. By default it is enabled.
        @param enable_assistant: For tests that require to enable Google
                                  Assistant service. Default is False.
        @param enterprise_arc_test: Skips opt_in causing enterprise tests to fail
        @param disable_locale_sync:
            Adds --arc-disable-locale-sync to browser args and this
            disables locale sync between Chrome and Android container. In case
            of disabling sync, Android container is started with language and
            preference language list as it was set on the moment of starting
            full instance. Used to prevent random app restarts caused by racy
            locale change, coming from profile sync. By default locale sync is
            disabled.
        @param disable_play_store_auto_update:
            Adds --arc-play-store-auto-update=off to browser args and this
            disables Play Store, GMS Core and third-party apps auto-update.
            By default auto-update is off to have stable autotest environment.
        @param mute_audio: Mute audio.
        @param proxy_server: To launch the chrome with --proxy-server
            Adds '--proxy-server="http://$HTTP_PROXY:PORT"' to browser args. By
            default proxy-server is disabled
        @param login_delay: Time for idle in login screen to simulate the time
                            required for password typing.
        """
        self._autotest_ext_path = None

        # Force autotest extension if we need enable Play Store.
        if (utils.is_arc_available() and
            (arc_util.should_start_arc(arc_mode) or not disable_arc_opt_in)):
            autotest_ext = True

        if extension_paths is None:
            extension_paths = []

        finder_options = browser_options.BrowserFinderOptions()
        if proxy_server:
            finder_options.browser_options.AppendExtraBrowserArgs(
                ['--proxy-server="%s"' % proxy_server])
        if utils.is_arc_available() and arc_util.should_start_arc(arc_mode):
            if disable_arc_opt_in and disable_arc_opt_in_verification:
                finder_options.browser_options.AppendExtraBrowserArgs(
                    ['--disable-arc-opt-in-verification'])
            if disable_arc_cpu_restriction:
                finder_options.browser_options.AppendExtraBrowserArgs(
                    ['--disable-arc-cpu-restriction'])
            if disable_app_sync:
                finder_options.browser_options.AppendExtraBrowserArgs(
                    ['--arc-disable-app-sync'])
            if disable_play_auto_install:
                finder_options.browser_options.AppendExtraBrowserArgs(
                    ['--arc-disable-play-auto-install'])
            if disable_locale_sync:
                finder_options.browser_options.AppendExtraBrowserArgs(
                    ['--arc-disable-locale-sync'])
            if disable_play_store_auto_update:
                finder_options.browser_options.AppendExtraBrowserArgs(
                    ['--arc-play-store-auto-update=off'])
            logged_in = True

        if autotest_ext:
            self._autotest_ext_path = os.path.join(os.path.dirname(__file__),
                                                   'autotest_private_ext')
            extension_paths.append(self._autotest_ext_path)
            finder_options.browser_options.AppendExtraBrowserArgs(
                ['--whitelisted-extension-id=%s' % self.AUTOTEST_EXT_ID])

        self._browser_type = (self.BROWSER_TYPE_LOGIN
                              if logged_in else self.BROWSER_TYPE_GUEST)
        finder_options.browser_type = self.browser_type
        if extra_browser_args:
            finder_options.browser_options.AppendExtraBrowserArgs(
                extra_browser_args)

        # finder options must be set before parse_args(), browser options must
        # be set before Create().
        # TODO(crbug.com/360890) Below MUST be '2' so that it doesn't inhibit
        # autotest debug logs
        finder_options.verbosity = 2
        finder_options.CreateParser().parse_args(args=[])
        b_options = finder_options.browser_options
        b_options.disable_component_extensions_with_background_pages = False
        b_options.create_browser_with_oobe = True
        b_options.clear_enterprise_policy = clear_enterprise_policy
        b_options.dont_override_profile = dont_override_profile
        b_options.disable_gaia_services = disable_gaia_services
        b_options.disable_default_apps = disable_default_apps
        b_options.disable_component_extensions_with_background_pages = disable_default_apps
        b_options.disable_background_networking = False
        b_options.expect_policy_fetch = expect_policy_fetch
        b_options.auto_login = auto_login
        b_options.gaia_login = gaia_login
        b_options.mute_audio = mute_audio
        b_options.login_delay = login_delay

        if utils.is_arc_available() and not disable_arc_opt_in:
            arc_util.set_browser_options_for_opt_in(b_options)

        self.username = b_options.username if username is None else username
        self.password = b_options.password if password is None else password
        self.username = NormalizeEmail(self.username)
        b_options.username = self.username
        b_options.password = self.password
        self.gaia_id = b_options.gaia_id if gaia_id is None else gaia_id
        b_options.gaia_id = self.gaia_id

        self.arc_mode = arc_mode

        if logged_in:
            extensions_to_load = b_options.extensions_to_load
            for path in extension_paths:
                extension = extension_to_load.ExtensionToLoad(
                    path, self.browser_type)
                extensions_to_load.append(extension)
            self._extensions_to_load = extensions_to_load

        # Turn on collection of Chrome coredumps via creation of a magic file.
        # (Without this, Chrome coredumps are trashed.)
        open(constants.CHROME_CORE_MAGIC_FILE, 'w').close()

        self._browser_to_create = browser_finder.FindBrowser(finder_options)
        self._browser_to_create.SetUpEnvironment(b_options)
        for i in range(num_tries):
            try:
                self._browser = self._browser_to_create.Create()
                self._browser_pid = \
                    cros_interface.CrOSInterface().GetChromePid()
                if utils.is_arc_available():
                    if disable_arc_opt_in:
                        if arc_util.should_start_arc(arc_mode):
                            arc_util.enable_play_store(self.autotest_ext, True)
                    else:
                        if not enterprise_arc_test:
                            wait_for_provisioning = \
                                arc_mode != arc_common.ARC_MODE_ENABLED_ASYNC
                            arc_util.opt_in(
                                browser=self.browser,
                                autotest_ext=self.autotest_ext,
                                wait_for_provisioning=wait_for_provisioning)
                    arc_util.post_processing_after_browser(self, arc_timeout)
                if enable_assistant:
                    assistant_util.enable_assistant(self.autotest_ext)
                break
            except exceptions.LoginException as e:
                logging.error('Timed out logging in, tries=%d, error=%s', i,
                              repr(e))
                if i == num_tries - 1:
                    raise
        if init_network_controller:
            self._browser.platform.network_controller.Open()
Esempio n. 30
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    1 if there is failure or 2 if there is an uncaught exception.
  """
    benchmark.CustomizeBrowserOptions(finder_options.browser_options)

    benchmark_metadata = benchmark.GetMetadata()
    possible_browser = browser_finder.FindBrowser(finder_options)
    expectations = benchmark.expectations

    target_platform = None
    if possible_browser:
        target_platform = possible_browser.platform
    else:
        target_platform = platform_module.GetHostPlatform()

    can_run_on_platform = benchmark._CanRunOnPlatform(target_platform,
                                                      finder_options)

    expectations_disabled = False
    # For now, test expectations are only applicable in the cases where the
    # testing target involves a browser.
    if possible_browser:
        expectations_disabled = expectations.IsBenchmarkDisabled(
            possible_browser.platform, finder_options)

    if expectations_disabled or not can_run_on_platform:
        print '%s is disabled on the selected browser' % benchmark.Name()
        if finder_options.run_disabled_tests and can_run_on_platform:
            print 'Running benchmark anyway due to: --also-run-disabled-tests'
        else:
            if can_run_on_platform:
                print 'Try --also-run-disabled-tests to force the benchmark to run.'
            else:
                print(
                    "This platform is not supported for this benchmark. If this is "
                    "in error please add it to the benchmark's supported platforms."
                )
            # If chartjson is specified, this will print a dict indicating the
            # benchmark name and disabled state.
            with results_options.CreateResults(
                    benchmark_metadata,
                    finder_options,
                    should_add_value=benchmark.ShouldAddValue,
                    benchmark_enabled=False) as results:
                results.PrintSummary()
            # When a disabled benchmark is run we now want to return success since
            # we are no longer filtering these out in the buildbot recipes.
            return 0

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    stories = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
        if any(not isinstance(p, page.Page) for p in stories.stories):
            raise Exception(
                'PageTest must be used with StorySet containing only '
                'telemetry.page.Page stories.')

    with results_options.CreateResults(
            benchmark_metadata,
            finder_options,
            should_add_value=benchmark.ShouldAddValue,
            benchmark_enabled=True) as results:
        try:
            Run(pt,
                stories,
                finder_options,
                results,
                benchmark.max_failures,
                expectations=expectations,
                max_num_values=benchmark.MAX_NUM_VALUES)
            return_code = 1 if results.had_failures else 0
            # We want to make sure that all expectations are linked to real stories,
            # this will log error messages if names do not match what is in the set.
            benchmark.GetBrokenExpectations(stories)
        except Exception:  # pylint: disable=broad-except

            logging.fatal(
                'Benchmark execution interrupted by a fatal exception.')

            filtered_stories = story_module.StoryFilter.FilterStorySet(stories)
            results.InterruptBenchmark(filtered_stories,
                                       _GetPageSetRepeat(finder_options))
            exception_formatter.PrintFormattedException()
            return_code = 2

        benchmark_owners = benchmark.GetOwners()
        benchmark_component = benchmark.GetBugComponents()
        benchmark_documentation_url = benchmark.GetDocumentationLink()

        if benchmark_owners:
            results.AddSharedDiagnostic(reserved_infos.OWNERS.name,
                                        benchmark_owners)

        if benchmark_component:
            results.AddSharedDiagnostic(reserved_infos.BUG_COMPONENTS.name,
                                        benchmark_component)

        if benchmark_documentation_url:
            results.AddSharedDiagnostic(reserved_infos.DOCUMENTATION_URLS.name,
                                        benchmark_documentation_url)

        try:
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud()
                results.UploadArtifactsToCloud()
        finally:
            memory_debug.LogHostMemoryUsage()
            results.PrintSummary()
    return return_code