Exemple #1
0
  def testDiscardFirstResult(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))

    class Measurement(page_test.PageTest):
      @property
      def discard_first_result(self):
        return True

      def ValidateAndMeasurePage(self, page, _, results):
        results.AddValue(string.StringValue(page, 'test', 't', page.url))

    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    options.reset_results = None
    options.upload_results = None
    options.results_label = None

    options.page_repeat = 1
    options.pageset_repeat = 1
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(Measurement(), ps, expectations, options, results)
    self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(0, len(results.failures))
    self.assertEquals(0, len(results.all_page_specific_values))

    options.page_repeat = 1
    options.pageset_repeat = 2
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(Measurement(), ps, expectations, options, results)
    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(0, len(results.failures))
    self.assertEquals(2, len(results.all_page_specific_values))

    options.page_repeat = 2
    options.pageset_repeat = 1
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(Measurement(), ps, expectations, options, results)
    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(0, len(results.failures))
    self.assertEquals(2, len(results.all_page_specific_values))

    options.output_formats = ['html']
    options.suppress_gtest_report = True
    options.page_repeat = 1
    options.pageset_repeat = 1
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(Measurement(), ps, expectations, options, results)
    self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(0, len(results.failures))
    self.assertEquals(0, len(results.all_page_specific_values))
Exemple #2
0
    def testDiscardFirstResult(self):
        us = user_story_set.UserStorySet()
        us.AddUserStory(DummyLocalUserStory(TestSharedPageState))
        us.AddUserStory(DummyLocalUserStory(TestSharedPageState))

        class Measurement(page_test.PageTest):
            @property
            def discard_first_result(self):
                return True

            def RunPage(self, page, _, results):
                results.AddValue(
                    string.StringValue(page, 'test', 't', page.name))

            def ValidateAndMeasurePage(self, page, tab, results):
                pass

        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                self.options)
        user_story_runner.Run(Measurement(), us, self.expectations,
                              self.options, results)

        self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
        self.assertEquals(0, len(results.failures))
        self.assertEquals(0, len(results.all_page_specific_values))

        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                self.options)
        self.options.page_repeat = 1
        self.options.pageset_repeat = 2
        user_story_runner.Run(Measurement(), us, self.expectations,
                              self.options, results)
        self.assertEquals(2, GetNumberOfSuccessfulPageRuns(results))
        self.assertEquals(0, len(results.failures))
        self.assertEquals(2, len(results.all_page_specific_values))

        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                self.options)
        self.options.page_repeat = 2
        self.options.pageset_repeat = 1
        user_story_runner.Run(Measurement(), us, self.expectations,
                              self.options, results)
        self.assertEquals(2, GetNumberOfSuccessfulPageRuns(results))
        self.assertEquals(0, len(results.failures))
        self.assertEquals(2, len(results.all_page_specific_values))

        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                self.options)
        self.options.page_repeat = 1
        self.options.pageset_repeat = 1
        user_story_runner.Run(Measurement(), us, self.expectations,
                              self.options, results)
        self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
        self.assertEquals(0, len(results.failures))
        self.assertEquals(0, len(results.all_page_specific_values))
Exemple #3
0
    def Run(self, finder_options):
        """Run this test with the given options."""
        self.CustomizeBrowserOptions(finder_options.browser_options)

        pt = self.PageTestClass()()
        pt.__name__ = self.__class__.__name__

        if hasattr(self, '_disabled_strings'):
            pt._disabled_strings = self._disabled_strings
        if hasattr(self, '_enabled_strings'):
            pt._enabled_strings = self._enabled_strings

        ps = self.CreatePageSet(finder_options)
        expectations = self.CreateExpectations(ps)

        self._DownloadGeneratedProfileArchive(finder_options)

        results = results_options.CreateResults(self.GetMetadata(),
                                                finder_options)
        try:
            page_runner.Run(pt, ps, expectations, finder_options, results)
        except page_test.TestNotSupportedOnPlatformFailure as failure:
            logging.warning(str(failure))

        results.PrintSummary()
        return len(results.failures)
Exemple #4
0
  def testHandlingOfTestThatRaisesWithNonFatalUnknownExceptions(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))

    class ExpectedException(Exception):
      pass

    class Test(page_test.PageTest):
      def __init__(self, *args):
        super(Test, self).__init__(*args)
        self.run_count = 0
      def ValidatePage(self, *_):
        old_run_count = self.run_count
        self.run_count += 1
        if old_run_count == 0:
          raise ExpectedException()

    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    test = Test()
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(test, ps, expectations, options, results)
    self.assertEquals(2, test.run_count)
    self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(1, len(results.failures))
    def testWebPageReplay(self):
        ps = example_domain.ExampleDomainPageSet()
        expectations = test_expectations.TestExpectations()
        body = []

        class TestWpr(page_test.PageTest):
            def ValidateAndMeasurePage(self, _, tab, __):
                body.append(tab.EvaluateJavaScript('document.body.innerText'))

        test = TestWpr()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)

        user_story_runner.Run(test, ps, expectations, options, results)

        self.longMessage = True
        self.assertIn('Example Domain',
                      body[0],
                      msg='URL: %s' % ps.pages[0].url)
        self.assertIn('Example Domain',
                      body[1],
                      msg='URL: %s' % ps.pages[1].url)

        self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
        self.assertEquals(0, len(results.failures))
Exemple #6
0
  def testNeedsBrowserRestartAfterEachPage(self):
    self.CaptureFormattedException()
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))

    class Test(page_test.PageTest):
      def __init__(self, *args, **kwargs):
        super(Test, self).__init__(*args, **kwargs)
        self.browser_starts = 0

      def DidStartBrowser(self, *args):
        super(Test, self).DidStartBrowser(*args)
        self.browser_starts += 1

      def ValidateAndMeasurePage(self, page, tab, results):
        pass

    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    test = Test(needs_browser_restart_after_each_page=True)
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(test, ps, expectations, options, results)
    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(2, test.browser_starts)
    self.assertFormattedExceptionIsEmpty()
    def _RunPageTestThatRaisesAppCrashException(self, test, max_failures):
        class TestPage(page_module.Page):
            def RunNavigateSteps(self, _):
                raise exceptions.AppCrashException

        ps = page_set.PageSet()
        for _ in range(5):
            ps.AddUserStory(
                TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir()))
        expectations = test_expectations.TestExpectations()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test,
                              ps,
                              expectations,
                              options,
                              results,
                              max_failures=max_failures)
        return results
Exemple #8
0
    def CreateResults(self):
        if self._benchmark is not None:
            benchmark_metadata = self._benchmark.GetMetadata()
        else:
            benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')

        return results_options.CreateResults(benchmark_metadata, self._options)
    def Run(self, options):
        # Installing extensions requires that the profile directory exist before
        # the browser is launched.
        if not options.browser_options.profile_dir:
            options.browser_options.profile_dir = tempfile.mkdtemp()
        options.browser_options.disable_default_apps = False

        self._PrepareExtensionInstallFiles(options.browser_options.profile_dir)

        expectations = test_expectations.TestExpectations()
        results = results_options.CreateResults(
            benchmark.BenchmarkMetadata(profile_creator.__class__.__name__),
            options)
        extension_page_test = _ExtensionPageTest()
        extension_page_test._expected_extension_count = len(
            self._extensions_to_install)
        user_story_runner.Run(extension_page_test,
                              extension_page_test._page_set, expectations,
                              options, results)

        self._CleanupExtensionInstallFiles()

        # Check that files on this list exist and have content.
        expected_files = [os.path.join('Default', 'Network Action Predictor')]
        for filename in expected_files:
            filename = os.path.join(options.output_profile_path, filename)
            if not os.path.getsize(filename) > 0:
                raise Exception("Profile not complete: %s is zero length." %
                                filename)

        if results.failures:
            logging.warning('Some pages failed.')
            logging.warning('Failed pages:\n%s',
                            '\n'.join(map(str, results.pages_that_failed)))
            raise Exception('ExtensionsProfileCreator failed.')
Exemple #10
0
  def testBrowserBeforeLaunch(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    page = page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
    ps.pages.append(page)

    class TestBeforeLaunch(page_test.PageTest):
      def __init__(self):
        super(TestBeforeLaunch, self).__init__()
        self._did_call_will_start = False
        self._did_call_did_start = False

      def WillStartBrowser(self, platform):
        self._did_call_will_start = True
        # TODO(simonjam): Test that the profile is available.

      def DidStartBrowser(self, browser):
        assert self._did_call_will_start
        self._did_call_did_start = True

      def ValidatePage(self, *_):
        assert self._did_call_did_start

    test = TestBeforeLaunch()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(test, ps, expectations, options, results)
Exemple #11
0
  def testUserAgent(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    page = page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
    ps.pages.append(page)
    ps.user_agent_type = 'tablet'

    class TestUserAgent(page_test.PageTest):
      def ValidatePage(self, _1, tab, _2):
        actual_user_agent = tab.EvaluateJavaScript('window.navigator.userAgent')
        expected_user_agent = user_agent.UA_TYPE_MAPPING['tablet']
        assert actual_user_agent.strip() == expected_user_agent

        # This is so we can check later that the test actually made it into this
        # function. Previously it was timing out before even getting here, which
        # should fail, but since it skipped all the asserts, it slipped by.
        self.hasRun = True # pylint: disable=W0201

    test = TestUserAgent()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(test, ps, expectations, options, results)

    self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
def GenerateProfiles(profile_creator_class, profile_creator_name, options):
  """Generate a profile"""
  expectations = test_expectations.TestExpectations()
  test = profile_creator_class()

  temp_output_directory = tempfile.mkdtemp()
  options.output_profile_path = temp_output_directory

  results = results_options.CreateResults(
      benchmark.BenchmarkMetadata(test.__class__.__name__), options)
  page_runner.Run(test, test.page_set, expectations, options, results)

  if results.failures:
    logging.warning('Some pages failed.')
    logging.warning('Failed pages:\n%s',
                    '\n'.join(results.pages_that_failed))
    return 1

  # Everything is a-ok, move results to final destination.
  generated_profiles_dir = os.path.abspath(options.output_dir)
  if not os.path.exists(generated_profiles_dir):
    os.makedirs(generated_profiles_dir)
  out_path = os.path.join(generated_profiles_dir, profile_creator_name)
  if os.path.exists(out_path):
    shutil.rmtree(out_path)

  shutil.copytree(temp_output_directory, out_path, ignore=_IsPseudoFile)
  shutil.rmtree(temp_output_directory)
  sys.stderr.write("SUCCESS: Generated profile copied to: '%s'.\n" % out_path)

  return 0
Exemple #13
0
  def testRetryOnBrowserCrash(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    ps.pages.append(page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))

    class CrashyMeasurement(page_test.PageTest):
      has_crashed = False
      def ValidateAndMeasurePage(self, page, tab, results):
        # This value should be discarded on the first run when the
        # browser crashed.
        results.AddValue(
            string.StringValue(page, 'test', 't', str(self.has_crashed)))
        if not self.has_crashed:
          self.has_crashed = True
          raise exceptions.BrowserGoneException(tab.browser)

    options = options_for_unittests.GetCopy()
    options.output_formats = ['csv']
    options.suppress_gtest_report = True

    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(CrashyMeasurement(), ps, expectations, options, results)

    self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(0, len(results.failures))
    self.assertEquals(1, len(results.all_page_specific_values))
    self.assertEquals(
        'True', results.all_page_specific_values[0].GetRepresentativeString())
Exemple #14
0
  def testRunPageWithStartupUrl(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    expectations = test_expectations.TestExpectations()
    page = page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
    page.startup_url = 'about:blank'
    ps.pages.append(page)

    class Measurement(page_test.PageTest):
      def __init__(self):
        super(Measurement, self).__init__()
        self.browser_restarted = False

      def CustomizeBrowserOptionsForSinglePage(self, ps, options):
        self.browser_restarted = True
        super(Measurement, self).CustomizeBrowserOptionsForSinglePage(ps,
                                                                      options)
      def ValidateAndMeasurePage(self, page, tab, results):
        pass

    options = options_for_unittests.GetCopy()
    options.page_repeat = 2
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    if not browser_finder.FindBrowser(options):
      return
    test = Measurement()
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(test, ps, expectations, options, results)
    self.assertEquals('about:blank', options.browser_options.startup_url)
    self.assertTrue(test.browser_restarted)
Exemple #15
0
  def testCleanUpPage(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    page = page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
    ps.pages.append(page)

    class Test(page_test.PageTest):
      def __init__(self):
        super(Test, self).__init__()
        self.did_call_clean_up = False

      def ValidatePage(self, *_):
        raise exceptions.IntentionalException

      def CleanUpAfterPage(self, page, tab):
        self.did_call_clean_up = True


    test = Test()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(test, ps, expectations, options, results)
    assert test.did_call_clean_up
  def Run(self, finder_options):
    """Run this test with the given options."""
    self.CustomizeBrowserOptions(finder_options.browser_options)

    pt = self.CreatePageTest(finder_options)
    pt.__name__ = self.__class__.__name__

    if hasattr(self, '_disabled_strings'):
      # pylint: disable=protected-access
      pt._disabled_strings = self._disabled_strings
    if hasattr(self, '_enabled_strings'):
      # pylint: disable=protected-access
      pt._enabled_strings = self._enabled_strings

    expectations = self.CreateExpectations()
    us = self.CreateUserStorySet(finder_options)

    self._DownloadGeneratedProfileArchive(finder_options)

    benchmark_metadata = self.GetMetadata()
    results = results_options.CreateResults(benchmark_metadata, finder_options)
    try:
      user_story_runner.Run(pt, us, expectations, finder_options, results)
    except page_test.TestNotSupportedOnPlatformFailure as failure:
      logging.warning(str(failure))

    bucket = cloud_storage.INTERNAL_BUCKET
    if finder_options.upload_results:
      results.UploadTraceFilesToCloud(bucket)
      results.UploadProfilingFilesToCloud(bucket)

    results.PrintSummary()
    return len(results.failures)
Exemple #17
0
    def RunMeasurement(self,
                       measurement,
                       ps,
                       expectations=test_expectations.TestExpectations(),
                       options=None):
        """Runs a measurement against a pageset, returning the rows its outputs."""
        if options is None:
            options = options_for_unittests.GetCopy()
        assert options
        temp_parser = options.CreateParser()
        story_runner.AddCommandLineArgs(temp_parser)
        defaults = temp_parser.get_default_values()
        for k, v in defaults.__dict__.items():
            if hasattr(options, k):
                continue
            setattr(options, k, v)

        measurement.CustomizeBrowserOptions(options.browser_options)
        options.output_file = None
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        options.output_trace_tag = None
        story_runner.ProcessCommandLineArgs(temp_parser, options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(measurement, ps, expectations, options, results)
        return results
    def testRunPageWithProfilingFlag(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        ps.pages.append(
            page_module.Page('file://blank.html',
                             ps,
                             base_dir=util.GetUnittestDataDir()))

        class Measurement(page_test.PageTest):
            pass

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        options.reset_results = None
        options.upload_results = None
        options.results_label = None
        options.output_dir = tempfile.mkdtemp()
        options.profiler = 'trace'
        try:
            SetUpUserStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            user_story_runner.Run(Measurement(), ps, expectations, options,
                                  results)
            self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
            self.assertEquals(0, len(results.failures))
            self.assertEquals(0, len(results.all_page_specific_values))
            self.assertTrue(
                os.path.isfile(
                    os.path.join(options.output_dir, 'blank_html.json')))
        finally:
            shutil.rmtree(options.output_dir)
Exemple #19
0
  def testOneTab(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    page = page_module.Page(
        'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
    ps.pages.append(page)

    class TestOneTab(page_test.PageTest):
      def __init__(self):
        super(TestOneTab, self).__init__()
        self._browser = None

      def DidStartBrowser(self, browser):
        self._browser = browser
        self._browser.tabs.New()

      def ValidatePage(self, *_):
        assert len(self._browser.tabs) == 1

    test = TestOneTab()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(test, ps, expectations, options, results)
  def testPagesetRepeat(self):
    us = user_story_set.UserStorySet()
    us.AddUserStory(DummyLocalUserStory(TestSharedPageState, name='blank'))
    us.AddUserStory(DummyLocalUserStory(TestSharedPageState, name='green'))

    class Measurement(page_test.PageTest):
      i = 0
      def RunPage(self, page, _, results):
        self.i += 1
        results.AddValue(scalar.ScalarValue(
            page, 'metric', 'unit', self.i))

      def ValidateAndMeasurePage(self, page, tab, results):
        pass

    self.options.page_repeat = 1
    self.options.pageset_repeat = 2
    self.options.output_formats = ['buildbot']
    results = results_options.CreateResults(
      EmptyMetadataForTest(), self.options)
    story_runner.Run(
        Measurement(), us, self.expectations, self.options, results)
    results.PrintSummary()
    contents = self.fake_stdout.getvalue()
    self.assertEquals(4, GetNumberOfSuccessfulPageRuns(results))
    self.assertEquals(0, len(results.failures))
    self.assertIn('RESULT metric: blank= [1,3] unit', contents)
    self.assertIn('RESULT metric: green= [2,4] unit', contents)
    self.assertIn('*RESULT metric: metric= [1,2,3,4] unit', contents)
  def _testMaxFailuresOptionIsRespectedAndOverridable(
      self, num_failing_user_stories, runner_max_failures, options_max_failures,
      expected_num_failures):
    class SimpleSharedState(
        shared_state.SharedState):
      _fake_platform = FakePlatform()
      _current_user_story = None

      @property
      def platform(self):
        return self._fake_platform

      def WillRunUserStory(self, story):
        self._current_user_story = story

      def RunUserStory(self, results):
        self._current_user_story.Run()

      def DidRunUserStory(self, results):
        pass

      def GetTestExpectationAndSkipValue(self, expectations):
        return 'pass', None

      def TearDownState(self):
        pass

    class FailingUserStory(user_story.UserStory):
      def __init__(self):
        super(FailingUserStory, self).__init__(
            shared_state_class=SimpleSharedState,
            is_local=True)
        self.was_run = False

      def Run(self):
        self.was_run = True
        raise page_test.Failure

    self.SuppressExceptionFormatting()

    uss = user_story_set.UserStorySet()
    for _ in range(num_failing_user_stories):
      uss.AddUserStory(FailingUserStory())

    options = _GetOptionForUnittest()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    if options_max_failures:
      options.max_failures = options_max_failures

    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(
        DummyTest(), uss, test_expectations.TestExpectations(), options,
        results, max_failures=runner_max_failures)
    self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
    self.assertEquals(expected_num_failures, len(results.failures))
    for ii, story in enumerate(uss.user_stories):
      self.assertEqual(story.was_run, ii < expected_num_failures)
 def setUp(self):
   self.fake_stdout = StringIO.StringIO()
   self.actual_stdout = sys.stdout
   sys.stdout = self.fake_stdout
   self.options = _GetOptionForUnittest()
   self.expectations = test_expectations.TestExpectations()
   self.results = results_options.CreateResults(
       EmptyMetadataForTest(), self.options)
   self._story_runner_logging_stub = None
    def testMaxFailuresOptionIsRespected(self):
        class TestPage(page_module.Page):
            def __init__(self, *args, **kwargs):
                super(TestPage, self).__init__(*args, **kwargs)
                self.was_run = False

            def RunNavigateSteps(self, action_runner):
                self.was_run = True
                raise Exception('Test exception')

        class Test(page_test.PageTest):
            def ValidatePage(self, *args):
                pass

        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        page1 = TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir())
        ps.pages.append(page1)
        page2 = TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir())
        ps.pages.append(page2)
        page3 = TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir())
        ps.pages.append(page3)
        page4 = TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir())
        ps.pages.append(page4)
        page5 = TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir())
        ps.pages.append(page5)

        options = options_for_unittests.GetCopy()
        options.output_format = 'none'
        options.suppress_gtest_report = True
        SetUpPageRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        page_runner.Run(Test(max_failures=2), ps, expectations, options,
                        results)
        self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
        # Runs up to max_failures+1 failing tests before stopping, since
        # every tests after max_failures failures have been encountered
        # may all be passing.
        self.assertEquals(3, len(results.failures))
        self.assertTrue(page1.was_run)
        self.assertTrue(page2.was_run)
        self.assertTrue(page3.was_run)
        self.assertFalse(page4.was_run)
        self.assertFalse(page5.was_run)
Exemple #24
0
    def Run(self, finder_options):
        """Run this test with the given options.

    Returns:
      The number of failure values (up to 254) or 255 if there is an uncaught
      exception.
    """
        self.CustomizeBrowserOptions(finder_options.browser_options)

        pt = self.CreatePageTest(finder_options)
        pt.__name__ = self.__class__.__name__

        if hasattr(self, '_disabled_strings'):
            # pylint: disable=protected-access
            pt._disabled_strings = self._disabled_strings
        if hasattr(self, '_enabled_strings'):
            # pylint: disable=protected-access
            pt._enabled_strings = self._enabled_strings

        expectations = self.CreateExpectations()
        us = self.CreateUserStorySet(finder_options)
        if isinstance(pt, page_test.PageTest):
            if any(not isinstance(p, page.Page) for p in us.user_stories):
                raise Exception(
                    'PageTest must be used with UserStorySet containing only '
                    'telemetry.page.Page user stories.')

        self._DownloadGeneratedProfileArchive(finder_options)

        benchmark_metadata = self.GetMetadata()
        with results_options.CreateResults(
                benchmark_metadata, finder_options,
                self.ValueCanBeAddedPredicate) as results:
            try:
                user_story_runner.Run(pt,
                                      us,
                                      expectations,
                                      finder_options,
                                      results,
                                      max_failures=self._max_failures)
                return_code = min(254, len(results.failures))
            except Exception:
                exception_formatter.PrintFormattedException()
                return_code = 255

            bucket = cloud_storage.BUCKET_ALIASES[finder_options.upload_bucket]
            if finder_options.upload_results:
                results.UploadTraceFilesToCloud(bucket)
                results.UploadProfilingFilesToCloud(bucket)

            results.PrintSummary()
        return return_code
    def Run(self, options):
        expectations = test_expectations.TestExpectations()
        results = results_options.CreateResults(
            benchmark.BenchmarkMetadata(profile_creator.__class__.__name__),
            options)
        user_story_runner.Run(self._page_test, self._page_test._page_set,
                              expectations, options, results)

        if results.failures:
            logging.warning('Some pages failed to load.')
            logging.warning('Failed pages:\n%s',
                            '\n'.join(map(str, results.pages_that_failed)))
            raise Exception('SmallProfileCreator failed.')
    def testPagesetRepeat(self):
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        ps.pages.append(
            page_module.Page('file://blank.html',
                             ps,
                             base_dir=util.GetUnittestDataDir()))
        ps.pages.append(
            page_module.Page('file://green_rect.html',
                             ps,
                             base_dir=util.GetUnittestDataDir()))

        class Measurement(page_test.PageTest):
            i = 0

            def ValidateAndMeasurePage(self, page, _, results):
                self.i += 1
                results.AddValue(
                    scalar.ScalarValue(page, 'metric', 'unit', self.i))

        output_file = tempfile.NamedTemporaryFile(delete=False).name
        try:
            options = options_for_unittests.GetCopy()
            options.output_format = 'buildbot'
            options.output_file = output_file
            options.suppress_gtest_report = True
            options.reset_results = None
            options.upload_results = None
            options.results_label = None

            options.page_repeat = 1
            options.pageset_repeat = 2
            SetUpPageRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            page_runner.Run(Measurement(), ps, expectations, options, results)
            results.PrintSummary()
            self.assertEquals(4, len(GetSuccessfulPageRuns(results)))
            self.assertEquals(0, len(results.failures))
            with open(output_file) as f:
                stdout = f.read()
            self.assertIn('RESULT metric: blank.html= [1,3] unit', stdout)
            self.assertIn('RESULT metric: green_rect.html= [2,4] unit', stdout)
            self.assertIn('*RESULT metric: metric= [1,2,3,4] unit', stdout)
        finally:
            # TODO(chrishenry): This is a HACK!!1 Really, the right way to
            # do this is for page_runner (or output formatter) to close any
            # files it has opened.
            for formatter in results._output_formatters:  # pylint: disable=W0212
                formatter.output_stream.close()
            os.remove(output_file)
    def _testMaxFailuresOptionIsRespectedAndOverridable(
            self, max_failures=None):
        self.SuppressExceptionFormatting()

        class TestPage(page_module.Page):
            def __init__(self, *args, **kwargs):
                super(TestPage, self).__init__(*args, **kwargs)
                self.was_run = False

            def RunNavigateSteps(self, action_runner):  # pylint: disable=W0613
                self.was_run = True
                raise Exception('Test exception')

        class Test(page_test.PageTest):
            def ValidatePage(self, *args):
                pass

        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        for ii in range(5):
            ps.pages.append(
                TestPage('file://blank.html',
                         ps,
                         base_dir=util.GetUnittestDataDir()))

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        expected_max_failures = 2
        if not max_failures is None:
            options.max_failures = max_failures
            expected_max_failures = max_failures
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(Test(max_failures=2), ps, expectations, options,
                              results)
        self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
        # Runs up to max_failures+1 failing tests before stopping, since
        # every tests after max_failures failures have been encountered
        # may all be passing.
        self.assertEquals(expected_max_failures + 1, len(results.failures))
        for ii in range(len(ps.pages)):
            if ii <= expected_max_failures:
                self.assertTrue(ps.pages[ii].was_run)
            else:
                self.assertFalse(ps.pages[ii].was_run)
    def testHandlingOfCrashedTabWithExpectedFailure(self):
        self.SuppressExceptionFormatting()
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        expectations.Fail('chrome://crash')
        page1 = page_module.Page('chrome://crash', ps)
        ps.pages.append(page1)

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(DummyTest(), ps, expectations, options, results)
        self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
        self.assertEquals(0, len(results.failures))
Exemple #29
0
  def testHandlingOfCrashedTab(self):
    ps = page_set.PageSet()
    expectations = test_expectations.TestExpectations()
    page1 = page_module.Page('chrome://crash', ps)
    ps.pages.append(page1)

    class Test(page_test.PageTest):
      def ValidatePage(self, *args):
        pass

    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpPageRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    page_runner.Run(Test(), ps, expectations, options, results)
    self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
    self.assertEquals(1, len(results.failures))
    def testRaiseBrowserGoneExceptionFromRestartBrowserBeforeEachPage(self):
        self.CaptureFormattedException()
        ps = page_set.PageSet()
        expectations = test_expectations.TestExpectations()
        ps.pages.append(
            page_module.Page('file://blank.html',
                             ps,
                             base_dir=util.GetUnittestDataDir()))
        ps.pages.append(
            page_module.Page('file://blank.html',
                             ps,
                             base_dir=util.GetUnittestDataDir()))

        class Test(page_test.PageTest):
            def __init__(self, *args):
                super(Test, self).__init__(
                    *args, needs_browser_restart_after_each_page=True)
                self.run_count = 0

            def RestartBrowserBeforeEachPage(self):
                old_run_count = self.run_count
                self.run_count += 1
                if old_run_count == 0:
                    raise exceptions.BrowserGoneException(None)
                return self._needs_browser_restart_after_each_page

            def ValidateAndMeasurePage(self, page, tab, results):
                pass

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        test = Test()
        SetUpUserStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        user_story_runner.Run(test, ps, expectations, options, results)
        self.assertEquals(2, test.run_count)
        self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
        self.assertEquals(1, len(results.failures))
        self.assertFormattedExceptionIsEmpty()