Exemple #1
0
  def testNeedsBrowserRestartAfterEachPage(self):
    self.CaptureFormattedException()
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))

    class Test(page_test.PageTest):
      def __init__(self, *args, **kwargs):
        super(Test, self).__init__(*args, **kwargs)
        self.browser_starts = 0

      def DidStartBrowser(self, *args):
        super(Test, self).DidStartBrowser(*args)
        self.browser_starts += 1

      def ValidateAndMeasurePage(self, page, tab, results):
        pass

    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    test = Test(needs_browser_restart_after_each_page=True)
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(test, ps, expectations, options, results)
    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(2, test.browser_starts)
    self.assertFormattedExceptionIsEmpty()
Exemple #2
0
  def testRunPageWithStartupUrl(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    expectations = test_expectations.TestExpectations()
    page = page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir(),
        startup_url='about:blank')
    ps.pages.append(page)

    class Measurement(page_test.PageTest):
      def __init__(self):
        super(Measurement, self).__init__()
        self.browser_restarted = False

      def CustomizeBrowserOptionsForSinglePage(self, ps, options):
        self.browser_restarted = True
        super(Measurement, self).CustomizeBrowserOptionsForSinglePage(ps,
                                                                      options)
      def ValidateAndMeasurePage(self, page, tab, results):
        pass

    options = options_for_unittests.GetCopy()
    options.page_repeat = 2
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    if not browser_finder.FindBrowser(options):
      return
    test = Measurement()
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(test, ps, expectations, options, results)
    self.assertEquals('about:blank', options.browser_options.startup_url)
    self.assertTrue(test.browser_restarted)
    def testWebPageReplay(self):
        ps = example_domain.ExampleDomainPageSet()
        expectations = test_expectations.TestExpectations()
        body = []

        class TestWpr(page_test.PageTest):
            def ValidateAndMeasurePage(self, _, tab, __):
                body.append(tab.EvaluateJavaScript('document.body.innerText'))

        test = TestWpr()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)

        user_story_runner.Run(test, ps, expectations, options, results)

        self.longMessage = True
        self.assertIn('Example Domain',
                      body[0],
                      msg='URL: %s' % ps.pages[0].url)
        self.assertIn('Example Domain',
                      body[1],
                      msg='URL: %s' % ps.pages[1].url)

        self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
        self.assertEquals(0, len(results.failures))
    def testRunPageWithProfilingFlag(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        ps.pages.append(
            page_module.Page('file://blank.html',
                             ps,
                             base_dir=util.GetUnittestDataDir()))

        class Measurement(page_test.PageTest):
            pass

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        options.reset_results = None
        options.upload_results = None
        options.results_label = None
        options.output_dir = tempfile.mkdtemp()
        options.profiler = 'trace'
        try:
            SetUpUserStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            user_story_runner.Run(Measurement(), ps, expectations, options,
                                  results)
            self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
            self.assertEquals(0, len(results.failures))
            self.assertEquals(0, len(results.all_page_specific_values))
            self.assertTrue(
                os.path.isfile(
                    os.path.join(options.output_dir, 'blank_html.json')))
        finally:
            shutil.rmtree(options.output_dir)
Exemple #5
0
    def _CreateBrowser(self,
                       autotest_ext=False,
                       auto_login=True,
                       gaia_login=False,
                       username=None,
                       password=None):
        """Finds and creates a browser for tests. if autotest_ext is True,
    also loads the autotest extension"""
        options = options_for_unittests.GetCopy()

        if autotest_ext:
            extension_path = os.path.join(util.GetUnittestDataDir(),
                                          'autotest_ext')
            assert os.path.isdir(extension_path)
            self._load_extension = extension_to_load.ExtensionToLoad(
                path=extension_path,
                browser_type=options.browser_type,
                is_component=True)
            options.extensions_to_load = [self._load_extension]

        browser_to_create = browser_finder.FindBrowser(options)
        self.assertTrue(browser_to_create)
        browser_options = options.browser_options
        browser_options.create_browser_with_oobe = True
        browser_options.auto_login = auto_login
        browser_options.gaia_login = gaia_login
        if username is not None:
            browser_options.username = username
        if password is not None:
            browser_options.password = password

        return browser_to_create.Create(options)
    def _RunPageTestThatRaisesAppCrashException(self, test, max_failures):
        class TestPage(page_module.Page):
            def RunNavigateSteps(self, _):
                raise exceptions.AppCrashException

        ps = page_set.PageSet()
        for _ in range(5):
            ps.AddUserStory(
                TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir()))
        expectations = test_expectations.TestExpectations()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test,
                              ps,
                              expectations,
                              options,
                              results,
                              max_failures=max_failures)
        return results
 def setUp(self):
     self._options = options_for_unittests.GetCopy()
     self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
     self._first_thread_name = (
         task_execution_time.TaskExecutionTime._RENDERER_THREADS[0])
     self._measurement = None
     self._page_set = None
def _GetAllPossiblePageTestInstances():
  page_test_instances = []
  measurements_dir = os.path.dirname(__file__)
  top_level_dir = os.path.dirname(measurements_dir)
  benchmarks_dir = os.path.join(top_level_dir, 'benchmarks')

  # Get all page test instances from measurement classes that are directly
  # constructable
  all_measurement_classes = discover.DiscoverClasses(
      measurements_dir, top_level_dir, page_test.PageTest).values()
  for measurement_class in all_measurement_classes:
    if classes.IsDirectlyConstructable(measurement_class):
      page_test_instances.append(measurement_class())

  all_benchmarks_classes = discover.DiscoverClasses(
      benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values()

  # Get all page test instances from defined benchmarks.
  # Note: since this depends on the command line options, there is no guaranteed
  # that this will generate all possible page test instances but it's worth
  # enough for smoke test purpose.
  for benchmark_class in all_benchmarks_classes:
    options = options_for_unittests.GetCopy()
    parser = options.CreateParser()
    benchmark_class.AddCommandLineArgs(parser)
    benchmark_module.AddCommandLineArgs(parser)
    benchmark_class.SetArgumentDefaults(parser)
    options.MergeDefaultValues(parser.get_default_values())
    pt = benchmark_class().CreatePageTest(options)
    if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement):
      page_test_instances.append(pt)

  return page_test_instances
    def RunMeasurement(self,
                       measurement,
                       ps,
                       expectations=test_expectations.TestExpectations(),
                       options=None):
        """Runs a measurement against a pageset, returning the rows its outputs."""
        if options is None:
            options = options_for_unittests.GetCopy()
        assert options
        temp_parser = options.CreateParser()
        story_runner.AddCommandLineArgs(temp_parser)
        defaults = temp_parser.get_default_values()
        for k, v in defaults.__dict__.items():
            if hasattr(options, k):
                continue
            setattr(options, k, v)

        measurement.CustomizeBrowserOptions(options.browser_options)
        options.output_file = None
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        options.output_trace_tag = None
        story_runner.ProcessCommandLineArgs(temp_parser, options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(measurement, ps, expectations, options, results)
        return results
Exemple #10
0
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1
        benchmark.options['page_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=W0232
            def CreatePageSet(self, options):
                # pylint: disable=E1002
                ps = super(SinglePageBenchmark, self).CreatePageSet(options)
                for p in ps.pages:
                    p.skip_waits = True
                    ps.user_stories = [p]
                    break
                return ps

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_format = 'none'
        options.suppress_gtest_report = True
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
    def testBrowserBeforeLaunch(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        page = page_module.Page('file://blank.html',
                                ps,
                                base_dir=util.GetUnittestDataDir())
        ps.pages.append(page)

        class TestBeforeLaunch(page_test.PageTest):
            def __init__(self):
                super(TestBeforeLaunch, self).__init__()
                self._did_call_will_start = False
                self._did_call_did_start = False

            def WillStartBrowser(self, platform):
                self._did_call_will_start = True
                # TODO(simonjam): Test that the profile is available.

            def DidStartBrowser(self, browser):
                assert self._did_call_will_start
                self._did_call_did_start = True

            def ValidatePage(self, *_):
                assert self._did_call_did_start

        test = TestBeforeLaunch()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test, ps, expectations, options, results)
    def testCleanUpPage(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        page = page_module.Page('file://blank.html',
                                ps,
                                base_dir=util.GetUnittestDataDir())
        ps.pages.append(page)

        class Test(page_test.PageTest):
            def __init__(self):
                super(Test, self).__init__()
                self.did_call_clean_up = False

            def ValidatePage(self, *_):
                raise exceptions.IntentionalException

            def CleanUpAfterPage(self, page, tab):
                self.did_call_clean_up = True

        test = Test()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test, ps, expectations, options, results)
        assert test.did_call_clean_up
    def testOneTab(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        page = page_module.Page('file://blank.html',
                                ps,
                                base_dir=util.GetUnittestDataDir())
        ps.pages.append(page)

        class TestOneTab(page_test.PageTest):
            def __init__(self):
                super(TestOneTab, self).__init__()
                self._browser = None

            def DidStartBrowser(self, browser):
                self._browser = browser
                self._browser.tabs.New()

            def ValidatePage(self, *_):
                assert len(self._browser.tabs) == 1

        test = TestOneTab()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test, ps, expectations, options, results)
    def testUserAgent(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        page = page_module.Page('file://blank.html',
                                ps,
                                base_dir=util.GetUnittestDataDir())
        ps.pages.append(page)
        ps.user_agent_type = 'tablet'

        class TestUserAgent(page_test.PageTest):
            def ValidatePage(self, _1, tab, _2):
                actual_user_agent = tab.EvaluateJavaScript(
                    'window.navigator.userAgent')
                expected_user_agent = user_agent.UA_TYPE_MAPPING['tablet']
                assert actual_user_agent.strip() == expected_user_agent

                # This is so we can check later that the test actually made it into this
                # function. Previously it was timing out before even getting here, which
                # should fail, but since it skipped all the asserts, it slipped by.
                self.hasRun = True  # pylint: disable=W0201

        test = TestUserAgent()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test, ps, expectations, options, results)

        self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
Exemple #15
0
  def _RebuildCredentials(self):
    credentials = {}
    if self._credentials_path == None:
      pass
    elif os.path.exists(self._credentials_path):
      with open(self._credentials_path, 'r') as f:
        credentials = json.loads(f.read())

    # TODO(nduca): use system keychain, if possible.
    homedir_credentials_path = os.path.expanduser('~/.telemetry-credentials')
    homedir_credentials = {}

    if (not options_for_unittests.GetCopy() and
        os.path.exists(homedir_credentials_path)):
      logging.info("Found ~/.telemetry-credentials. Its contents will be used "
                   "when no other credentials can be found.")
      with open(homedir_credentials_path, 'r') as f:
        homedir_credentials = json.loads(f.read())

    self._credentials = {}
    all_keys = set(credentials.keys()).union(
      homedir_credentials.keys()).union(
      self._extra_credentials.keys())

    for k in all_keys:
      if k in credentials:
        self._credentials[k] = credentials[k]
      if k in homedir_credentials:
        logging.info("Will use ~/.telemetry-credentials for %s logins." % k)
        self._credentials[k] = homedir_credentials[k]
      if k in self._extra_credentials:
        self._credentials[k] = self._extra_credentials[k]
Exemple #16
0
 def setUpClass(cls):
   cls._number_of_tabs = 4
   cls._profile_dir = _GenerateBrowserProfile(cls._number_of_tabs)
   cls._options = options_for_unittests.GetCopy()
   cls._options.browser_options.AppendExtraBrowserArgs(
       ['--restore-last-session'])
   cls._options.browser_options.profile_dir = cls._profile_dir
   cls._browser_to_create = browser_finder.FindBrowser(cls._options)
Exemple #17
0
 def setUp(self):
     options = options_for_unittests.GetCopy()
     self._cri = cros_interface.CrOSInterface(options.cros_remote,
                                              options.cros_ssh_identity)
     self._is_guest = options.browser_type == 'cros-chrome-guest'
     self._username = options.browser_options.username
     self._password = options.browser_options.password
     self._load_extension = None
Exemple #18
0
 def setUp(self):
   self._options = options_for_unittests.GetCopy()
   self._stubs = system_stub.Override(
       android_platform_backend,
       ['perf_control', 'adb_commands'])
   self.battery_patcher = mock.patch.object(battery_utils, 'BatteryUtils')
   self.battery_patcher.start()
   self._actual_ps_util = android_platform_backend.psutil
 def testNonExistentExtensionPath(self):
     """Test that a non-existent extension path will raise an exception."""
     extension_path = os.path.join(util.GetUnittestDataDir(), 'foo')
     options = options_for_unittests.GetCopy()
     self.assertRaises(
         extension_to_load.ExtensionPathNonExistentException,
         lambda: extension_to_load.ExtensionToLoad(extension_path, options.
                                                   browser_type))
def _GetOptionForUnittest():
  options = options_for_unittests.GetCopy()
  options.output_formats = ['none']
  options.suppress_gtest_report = False
  parser = options.CreateParser()
  story_runner.AddCommandLineArgs(parser)
  options.MergeDefaultValues(parser.get_default_values())
  story_runner.ProcessCommandLineArgs(parser, options)
  return options
    def setUp(self):
        self._options = options_for_unittests.GetCopy()
        self._stubs = system_stub.Override(android_platform_backend, [
            'perf_control', 'thermal_throttle', 'adb_commands', 'certutils',
            'adb_install_cert'
        ])

        # Skip _FixPossibleAdbInstability by setting psutil to None.
        self._actual_ps_util = android_platform_backend.psutil
        android_platform_backend.psutil = None
 def testComponentExtensionNoPublicKey(self):
     # simple_extension does not have a public key.
     extension_path = os.path.join(util.GetUnittestDataDir(),
                                   'simple_extension')
     options = options_for_unittests.GetCopy()
     self.assertRaises(
         extension_to_load.MissingPublicKeyException,
         lambda: extension_to_load.ExtensionToLoad(extension_path,
                                                   browser_type=options.
                                                   browser_type,
                                                   is_component=True))
Exemple #23
0
 def setUp(self):
     self._options = options_for_unittests.GetCopy()
     self._stubs = system_stub.Override(android_platform_backend,
                                        ['perf_control', 'adb_commands'])
     self.battery_patcher = mock.patch.object(battery_utils, 'BatteryUtils')
     self.battery_patcher.start()
     self._actual_ps_util = android_platform_backend.psutil
     self.setup_prebuilt_tool_patcher = mock.patch(
         'telemetry.core.platform.android_platform_backend._SetupPrebuiltTools'
     )
     m = self.setup_prebuilt_tool_patcher.start()
     m.return_value = True
 def testExtensionNotLoaded(self):
     """Querying an extension that was not loaded will return None"""
     extension_path = os.path.join(util.GetUnittestDataDir(),
                                   'simple_extension')
     options = options_for_unittests.GetCopy()
     load_extension = extension_to_load.ExtensionToLoad(
         extension_path, options.browser_type)
     browser_to_create = browser_finder.FindBrowser(options)
     with browser_to_create.Create(options) as b:
         if b.supports_extensions:
             self.assertRaises(KeyError,
                               lambda: b.extensions[load_extension])
  def testPerformNavigations(self):
    maximum_batch_size = 15
    options = options_for_unittests.GetCopy()
    extender = FastNavigationProfileExtender(options, maximum_batch_size)

    navigation_urls = []
    for i in range(extender._NUM_TABS):
      navigation_urls.append('http://test%s.com' % i)
    batch_size = 5
    navigation_urls_batch = navigation_urls[3:3 + batch_size]

    extender.GetUrlIterator = mock.MagicMock(
        return_value=iter(navigation_urls_batch))
    extender.ShouldExitAfterBatchNavigation = mock.MagicMock(return_value=True)
    extender._WaitForQueuedTabsToLoad = mock.MagicMock()

    extender._browser = FakeBrowser()
    extender._BatchNavigateTabs = mock.MagicMock()

    # Set up a callback to record the tabs and urls in each navigation.
    callback_tabs_batch = []
    callback_urls_batch = []
    def SideEffect(*args, **_):
      batch = args[0]
      for tab, url in batch:
        callback_tabs_batch.append(tab)
        callback_urls_batch.append(url)
    extender._BatchNavigateTabs.side_effect = SideEffect

    # Perform the navigations.
    extender._PerformNavigations()

    # Each url in the batch should have been navigated to exactly once.
    self.assertEqual(set(callback_urls_batch), set(navigation_urls_batch))

    # The other urls should not have been navigated to.
    navigation_urls_remaining = (set(navigation_urls) -
        set(navigation_urls_batch))
    self.assertFalse(navigation_urls_remaining & set(callback_urls_batch))

    # The first couple of tabs should have been navigated once. The remaining
    # tabs should not have been navigated.
    for i in range(len(extender._browser.tabs)):
      tab = extender._browser.tabs._tabs[i]

      if i < batch_size:
        expected_tab_navigation_count = 1
      else:
        expected_tab_navigation_count = 0

      count = callback_tabs_batch.count(tab)
      self.assertEqual(count, expected_tab_navigation_count)
Exemple #26
0
    def testRealLoginIfPossible(self):
        credentials_path = _GetCredentialsPath()
        if not credentials_path:
            logging.warning('Credentials file not found, skipping test.')
            return

        options = options_for_unittests.GetCopy()
        with browser_finder.FindBrowser(options).Create(options) as b:
            b.credentials.credentials_path = credentials_path
            if not b.credentials.CanLogin(self._credentials_type):
                return
            ret = b.credentials.LoginNeeded(b.tabs[0], self._credentials_type)
            self.assertTrue(ret)
    def testEscapeCmdArguments(self):
        ''' Commands and their arguments that are executed through the cros
    interface should follow bash syntax. This test needs to run on remotely
    and locally on the device to check for consistency.
    '''
        with cros_interface.CrOSInterface(
                options_for_unittests.GetCopy().cros_remote,
                options_for_unittests.GetCopy().cros_ssh_identity) as cri:

            # Check arguments with no special characters
            stdout, _ = cri.RunCmdOnDevice(
                ['echo', '--arg1=value1', '--arg2=value2', '--arg3="value3"'])
            assert stdout.strip(
            ) == '--arg1=value1 --arg2=value2 --arg3=value3'

            # Check argument with special characters escaped
            stdout, _ = cri.RunCmdOnDevice(['echo', '--arg=A\\; echo \\"B\\"'])
            assert stdout.strip() == '--arg=A; echo "B"'

            # Check argument with special characters in quotes
            stdout, _ = cri.RunCmdOnDevice(['echo', "--arg='$HOME;;$PATH'"])
            assert stdout.strip() == "--arg=$HOME;;$PATH"
    def CreateBrowserWithExtension(self, ext_path):
        extension_path = os.path.join(util.GetUnittestDataDir(), ext_path)
        options = options_for_unittests.GetCopy()
        load_extension = extension_to_load.ExtensionToLoad(
            extension_path, options.browser_type)
        options.extensions_to_load = [load_extension]
        browser_to_create = browser_finder.FindBrowser(options)

        if not browser_to_create:
            # May not find a browser that supports extensions.
            return False
        self._browser = browser_to_create.Create(options)
        self._extension = self._browser.extensions[load_extension]
        self._extension_id = load_extension.extension_id
        self.assertTrue(self._extension)
        return True
    def _testMaxFailuresOptionIsRespectedAndOverridable(
            self, max_failures=None):
        self.SuppressExceptionFormatting()

        class TestPage(page_module.Page):
            def __init__(self, *args, **kwargs):
                super(TestPage, self).__init__(*args, **kwargs)
                self.was_run = False

            def RunNavigateSteps(self, action_runner):  # pylint: disable=W0613
                self.was_run = True
                raise Exception('Test exception')

        class Test(page_test.PageTest):
            def ValidatePage(self, *args):
                pass

        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        for ii in range(5):
            ps.pages.append(
                TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir()))

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        expected_max_failures = 2
        if not max_failures is None:
            options.max_failures = max_failures
            expected_max_failures = max_failures
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(Test(max_failures=2), ps, expectations, options,
                              results)
        self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
        # Runs up to max_failures+1 failing tests before stopping, since
        # every tests after max_failures failures have been encountered
        # may all be passing.
        self.assertEquals(expected_max_failures + 1, len(results.failures))
        for ii in range(len(ps.pages)):
            if ii <= expected_max_failures:
                self.assertTrue(ps.pages[ii].was_run)
            else:
                self.assertFalse(ps.pages[ii].was_run)
Exemple #30
0
    def setUp(self):
        self._options = options_for_unittests.GetCopy()
        self._stubs = system_stub.Override(android_platform_backend, [
            'perf_control', 'thermal_throttle', 'adb_commands', 'certutils',
            'adb_install_cert', 'platformsettings'
        ])

        # Skip _FixPossibleAdbInstability by setting psutil to None.
        self._actual_ps_util = android_platform_backend.psutil
        android_platform_backend.psutil = None
        self.battery_patcher = mock.patch.object(battery_utils, 'BatteryUtils')
        self.battery_patcher.start()
        self.setup_prebuilt_tool_patcher = mock.patch(
            'telemetry.core.platform.android_platform_backend._SetupPrebuiltTools'
        )
        m = self.setup_prebuilt_tool_patcher.start()
        m.return_value = True