예제 #1
0
 def _RunGpuTest(self, url, test_name, args):
     temp_page = _EmulatedPage(url, test_name)
     expectations = self.__class__.GetExpectations()
     expectation = expectations.GetExpectationForPage(
         self.browser, temp_page)
     if expectation == 'skip':
         # skipTest in Python's unittest harness raises an exception, so
         # aborts the control flow here.
         self.skipTest('SKIPPING TEST due to test expectations')
     try:
         self.RunActualGpuTest(url, *args)
     except Exception:
         if expectation == 'pass':
             # This is not an expected exception or test failure, so print
             # the detail to the console.
             exception_formatter.PrintFormattedException()
             # This failure might have been caused by a browser or renderer
             # crash, so restart the browser to make sure any state doesn't
             # propagate to the next test iteration.
             self._RestartBrowser('unexpected test failure')
             raise
         elif expectation == 'fail':
             msg = 'Expected exception while running %s' % test_name
             exception_formatter.PrintFormattedException(msg=msg)
             return
         if expectation != 'flaky':
             logging.warning(
                 'Unknown expectation %s while handling exception for %s',
                 expectation, test_name)
             raise
         # Flaky tests are handled here.
         num_retries = expectations.GetFlakyRetriesForPage(
             self.browser, temp_page)
         if not num_retries:
             # Re-raise the exception.
             raise
         # Re-run the test up to |num_retries| times.
         for ii in xrange(0, num_retries):
             print 'FLAKY TEST FAILURE, retrying: ' + test_name
             try:
                 # For robustness, shut down the browser and restart it
                 # between flaky test failures, to make sure any state
                 # doesn't propagate to the next iteration.
                 self._RestartBrowser('flaky test failure')
                 self.RunActualGpuTest(url, *args)
                 break
             except Exception:
                 # Squelch any exceptions from any but the last retry.
                 if ii == num_retries - 1:
                     # Restart the browser after the last failure to make sure
                     # any state doesn't propagate to the next iteration.
                     self._RestartBrowser('excessive flaky test failures')
                     raise
     else:
         if expectation == 'fail':
             logging.warning('%s was expected to fail, but passed.\n',
                             test_name)
 def _RunGpuTest(self, url, test_name, *args):
     expected_results, should_retry_on_failure = (
         self.GetExpectationsForTest())
     try:
         # TODO(nednguyen): For some reason the arguments are getting wrapped
         # in another tuple sometimes (like in the WebGL extension tests).
         # Perhaps only if multiple arguments are yielded in the test
         # generator?
         if len(args) == 1 and isinstance(args[0], tuple):
             args = args[0]
         self.RunActualGpuTest(url, *args)
     except Exception:
         if ResultType.Failure in expected_results or should_retry_on_failure:
             if should_retry_on_failure:
                 # For robustness, shut down the browser and restart it
                 # between flaky test failures, to make sure any state
                 # doesn't propagate to the next iteration.
                 self._RestartBrowser('flaky test failure')
             else:
                 msg = 'Expected exception while running %s' % test_name
                 exception_formatter.PrintFormattedException(msg=msg)
                 # Even though this is a known failure, the browser might still
                 # be in a bad state; for example, certain kinds of timeouts
                 # will affect the next test. Restart the browser to prevent
                 # these kinds of failures propagating to the next test.
                 self._RestartBrowser('expected test failure')
         else:
             # This is not an expected exception or test failure, so print
             # the detail to the console.
             exception_formatter.PrintFormattedException()
             # Symbolize any crash dump (like from the GPU process) that
             # might have happened but wasn't detected above. Note we don't
             # do this for either 'fail' or 'flaky' expectations because
             # there are still quite a few flaky failures in the WebGL test
             # expectations, and since minidump symbolization is slow
             # (upwards of one minute on a fast laptop), symbolizing all the
             # stacks could slow down the tests' running time unacceptably.
             # We also don't do this if the browser failed to startup.
             if self.browser is not None:
                 self.browser.LogSymbolizedUnsymbolizedMinidumps(
                     logging.ERROR)
             # This failure might have been caused by a browser or renderer
             # crash, so restart the browser to make sure any state doesn't
             # propagate to the next test iteration.
             self._RestartBrowser('unexpected test failure')
         self.fail()
     else:
         if ResultType.Failure in expected_results:
             logging.warning('%s was expected to fail, but passed.\n',
                             test_name)
예제 #3
0
def RunStoryWithRetries(cls, shared_page_state, results):
    page = shared_page_state.current_page
    expectations = page.GetExpectations()
    expectation = 'pass'
    if expectations:
        expectation = expectations.GetExpectationForPage(
            shared_page_state.browser, page)
    if expectation == 'skip':
        raise Exception(
            'Skip expectations should have been handled in CanRunOnBrowser')
    try:
        super(cls, shared_page_state).RunStory(results)
    except Exception:
        if expectation == 'pass':
            raise
        elif expectation == 'fail':
            msg = 'Expected exception while running %s' % page.display_name
            exception_formatter.PrintFormattedException(msg=msg)
            return
        if expectation != 'flaky':
            logging.warning(
                'Unknown expectation %s while handling exception for %s',
                expectation, page.display_name)
            raise
        # Flaky tests are handled here.
        num_retries = expectations.GetFlakyRetriesForPage(
            shared_page_state.browser, page)
        if not num_retries:
            # Re-raise the exception.
            raise
        # Re-run the test up to |num_retries| times.
        for ii in xrange(0, num_retries):
            print 'FLAKY TEST FAILURE, retrying: ' + page.display_name
            try:
                super(cls, shared_page_state).RunStory(results)
                break
            except Exception:
                # Squelch any exceptions from any but the last retry.
                if ii == num_retries - 1:
                    raise
    else:
        if expectation == 'fail':
            logging.warning('%s was expected to fail, but passed.\n',
                            page.display_name)
예제 #4
0
 def _RunGpuTest(self, url, test_name, *args):
     expectations = self.__class__.GetExpectations()
     expectation = expectations.GetExpectationForTest(
         self.browser, url, test_name)
     if self.__class__._also_run_disabled_tests:
         # Ignore test expectations if the user has requested it.
         expectation = 'pass'
     if expectation == 'skip':
         # skipTest in Python's unittest harness raises an exception, so
         # aborts the control flow here.
         self.skipTest('SKIPPING TEST due to test expectations')
     try:
         # TODO(nednguyen): For some reason the arguments are getting wrapped
         # in another tuple sometimes (like in the WebGL extension tests).
         # Perhaps only if multiple arguments are yielded in the test
         # generator?
         if len(args) == 1 and isinstance(args[0], tuple):
             args = args[0]
         self.RunActualGpuTest(url, *args)
     except Exception:
         if expectation == 'pass':
             # This is not an expected exception or test failure, so print
             # the detail to the console.
             exception_formatter.PrintFormattedException()
             # Symbolize any crash dump (like from the GPU process) that
             # might have happened but wasn't detected above. Note we don't
             # do this for either 'fail' or 'flaky' expectations because
             # there are still quite a few flaky failures in the WebGL test
             # expectations, and since minidump symbolization is slow
             # (upwards of one minute on a fast laptop), symbolizing all the
             # stacks could slow down the tests' running time unacceptably.
             self.browser.LogSymbolizedUnsymbolizedMinidumps(logging.ERROR)
             # This failure might have been caused by a browser or renderer
             # crash, so restart the browser to make sure any state doesn't
             # propagate to the next test iteration.
             self._RestartBrowser('unexpected test failure')
             raise
         elif expectation == 'fail':
             msg = 'Expected exception while running %s' % test_name
             exception_formatter.PrintFormattedException(msg=msg)
             # Even though this is a known failure, the browser might still
             # be in a bad state; for example, certain kinds of timeouts
             # will affect the next test. Restart the browser to prevent
             # these kinds of failures propagating to the next test.
             self._RestartBrowser('expected test failure')
             return
         if expectation != 'flaky':
             logging.warning(
                 'Unknown expectation %s while handling exception for %s',
                 expectation, test_name)
             raise
         # Flaky tests are handled here.
         num_retries = expectations.GetFlakyRetriesForTest(
             self.browser, url, test_name)
         if not num_retries:
             # Re-raise the exception.
             raise
         # Re-run the test up to |num_retries| times.
         for ii in xrange(0, num_retries):
             print 'FLAKY TEST FAILURE, retrying: ' + test_name
             try:
                 # For robustness, shut down the browser and restart it
                 # between flaky test failures, to make sure any state
                 # doesn't propagate to the next iteration.
                 self._RestartBrowser('flaky test failure')
                 self.RunActualGpuTest(url, *args)
                 break
             except Exception:
                 # Squelch any exceptions from any but the last retry.
                 if ii == num_retries - 1:
                     # Restart the browser after the last failure to make sure
                     # any state doesn't propagate to the next iteration.
                     self._RestartBrowser('excessive flaky test failures')
                     raise
     else:
         if expectation == 'fail':
             logging.warning('%s was expected to fail, but passed.\n',
                             test_name)
예제 #5
0
 def _RunGpuTest(self, url, test_name, *args):
     temp_page = _EmulatedPage(url, test_name)
     expectations = self.__class__.GetExpectations()
     expectation = expectations.GetExpectationForPage(
         self.browser, temp_page)
     if expectation == 'skip':
         # skipTest in Python's unittest harness raises an exception, so
         # aborts the control flow here.
         self.skipTest('SKIPPING TEST due to test expectations')
     try:
         # TODO(nednguyen): For some reason the arguments are getting wrapped
         # in another tuple sometimes (like in the WebGL extension tests).
         # Perhaps only if multiple arguments are yielded in the test
         # generator?
         if len(args) == 1 and isinstance(args[0], tuple):
             args = args[0]
         self.RunActualGpuTest(url, *args)
     except Exception:
         if expectation == 'pass':
             # This is not an expected exception or test failure, so print
             # the detail to the console.
             exception_formatter.PrintFormattedException()
             # This failure might have been caused by a browser or renderer
             # crash, so restart the browser to make sure any state doesn't
             # propagate to the next test iteration.
             self._RestartBrowser('unexpected test failure')
             raise
         elif expectation == 'fail':
             msg = 'Expected exception while running %s' % test_name
             exception_formatter.PrintFormattedException(msg=msg)
             # Even though this is a known failure, the browser might still
             # be in a bad state; for example, certain kinds of timeouts
             # will affect the next test. Restart the browser to prevent
             # these kinds of failures propagating to the next test.
             self._RestartBrowser('expected test failure')
             return
         if expectation != 'flaky':
             logging.warning(
                 'Unknown expectation %s while handling exception for %s',
                 expectation, test_name)
             raise
         # Flaky tests are handled here.
         num_retries = expectations.GetFlakyRetriesForPage(
             self.browser, temp_page)
         if not num_retries:
             # Re-raise the exception.
             raise
         # Re-run the test up to |num_retries| times.
         for ii in xrange(0, num_retries):
             print 'FLAKY TEST FAILURE, retrying: ' + test_name
             try:
                 # For robustness, shut down the browser and restart it
                 # between flaky test failures, to make sure any state
                 # doesn't propagate to the next iteration.
                 self._RestartBrowser('flaky test failure')
                 self.RunActualGpuTest(url, *args)
                 break
             except Exception:
                 # Squelch any exceptions from any but the last retry.
                 if ii == num_retries - 1:
                     # Restart the browser after the last failure to make sure
                     # any state doesn't propagate to the next iteration.
                     self._RestartBrowser('excessive flaky test failures')
                     raise
     else:
         if expectation == 'fail':
             logging.warning('%s was expected to fail, but passed.\n',
                             test_name)