def run(self, result=None): # Bug 967566 suggests refactoring run, which would hopefully # mean getting rid of this inner function, which only sits # here to reduce code duplication: def expected_failure(result, exc_info): addExpectedFailure = getattr(result, "addExpectedFailure", None) if addExpectedFailure is not None: addExpectedFailure(self, exc_info) else: warnings.warn("TestResult has no addExpectedFailure method, " "reporting as passes", RuntimeWarning) result.addSuccess(self) self.start_time = time.time() orig_result = result if result is None: result = self.defaultTestResult() startTestRun = getattr(result, "startTestRun", None) if startTestRun is not None: startTestRun() result.startTest(self) testMethod = getattr(self, self._testMethodName) if getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False): # If the class or method was skipped. try: skip_why = getattr(self.__class__, "__unittest_skip_why__", "") or getattr( testMethod, "__unittest_skip_why__", "" ) self._addSkip(result, skip_why) finally: result.stopTest(self) self.stop_time = time.time() return try: success = False try: if self.expected == "fail": try: self.setUp() except Exception: raise _ExpectedFailure(sys.exc_info()) else: self.setUp() except SkipTest as e: self._addSkip(result, str(e)) except KeyboardInterrupt: raise except _ExpectedFailure as e: expected_failure(result, e.exc_info) except: self._enter_pm() result.addError(self, sys.exc_info()) else: try: if self.expected == "fail": try: testMethod() except: raise _ExpectedFailure(sys.exc_info()) raise _UnexpectedSuccess else: testMethod() except self.failureException: self._enter_pm() result.addFailure(self, sys.exc_info()) except KeyboardInterrupt: raise except _ExpectedFailure as e: expected_failure(result, e.exc_info) except _UnexpectedSuccess: addUnexpectedSuccess = getattr(result, "addUnexpectedSuccess", None) if addUnexpectedSuccess is not None: addUnexpectedSuccess(self) else: warnings.warn( "TestResult has no addUnexpectedSuccess method, reporting as failures", RuntimeWarning ) result.addFailure(self, sys.exc_info()) except SkipTest as e: self._addSkip(result, str(e)) except: self._enter_pm() result.addError(self, sys.exc_info()) else: success = True try: if self.expected == "fail": try: self.tearDown() except: raise _ExpectedFailure(sys.exc_info()) else: self.tearDown() except KeyboardInterrupt: raise except _ExpectedFailure as e: expected_failure(result, e.exc_info) except: self._enter_pm() result.addError(self, sys.exc_info()) success = False # Here we could handle doCleanups() instead of calling cleanTest directly self.cleanTest() if success: result.addSuccess(self) finally: result.stopTest(self) if orig_result is None: stopTestRun = getattr(result, "stopTestRun", None) if stopTestRun is not None: stopTestRun()
def run(self, result=None): # Bug 967566 suggests refactoring run, which would hopefully # mean getting rid of this inner function, which only sits # here to reduce code duplication: def expected_failure(result, exc_info): addExpectedFailure = getattr(result, "addExpectedFailure", None) if addExpectedFailure is not None: addExpectedFailure(self, exc_info) else: warnings.warn( "TestResult has no addExpectedFailure method, " "reporting as passes", RuntimeWarning) result.addSuccess(self) self.start_time = time.time() orig_result = result if result is None: result = self.defaultTestResult() startTestRun = getattr(result, 'startTestRun', None) if startTestRun is not None: startTestRun() result.startTest(self) testMethod = getattr(self, self._testMethodName) if (getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False)): # If the class or method was skipped. try: skip_why = ( getattr(self.__class__, '__unittest_skip_why__', '') or getattr(testMethod, '__unittest_skip_why__', '')) self._addSkip(result, skip_why) finally: result.stopTest(self) self.stop_time = time.time() return try: success = False try: if self.expected == "fail": try: self.setUp() except Exception: raise _ExpectedFailure(sys.exc_info()) else: self.setUp() except SkipTest as e: self._addSkip(result, str(e)) except KeyboardInterrupt: raise except _ExpectedFailure as e: expected_failure(result, e.exc_info) except: self._enter_pm() result.addError(self, sys.exc_info()) else: try: if self.expected == 'fail': try: testMethod() except: raise _ExpectedFailure(sys.exc_info()) raise _UnexpectedSuccess else: testMethod() except self.failureException: self._enter_pm() result.addFailure(self, sys.exc_info()) except KeyboardInterrupt: raise except _ExpectedFailure as e: expected_failure(result, e.exc_info) except _UnexpectedSuccess: addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None) if addUnexpectedSuccess is not None: addUnexpectedSuccess(self) else: warnings.warn( "TestResult has no addUnexpectedSuccess method, reporting as failures", RuntimeWarning) result.addFailure(self, sys.exc_info()) except SkipTest as e: self._addSkip(result, str(e)) except: self._enter_pm() result.addError(self, sys.exc_info()) else: success = True try: if self.expected == "fail": try: self.tearDown() except: raise _ExpectedFailure(sys.exc_info()) else: self.tearDown() except KeyboardInterrupt: raise except _ExpectedFailure as e: expected_failure(result, e.exc_info) except: self._enter_pm() result.addError(self, sys.exc_info()) success = False # Here we could handle doCleanups() instead of calling cleanTest directly self.cleanTest() if success: result.addSuccess(self) finally: result.stopTest(self) if orig_result is None: stopTestRun = getattr(result, 'stopTestRun', None) if stopTestRun is not None: stopTestRun()
def run_js_test(self, filename, marionette=None): """ Run a JavaScript test file and collect its set of assertions into the current test's results. :param filename: The path to the JavaScript test file to execute. May be relative to the current script. :param marionette: The Marionette object in which to execute the test. Defaults to self.marionette. """ marionette = marionette or self.marionette if not os.path.isabs(filename): # Find the caller's filename and make the path relative to that. caller_file = sys._getframe(1).f_globals.get("__file__", "") caller_file = os.path.abspath(caller_file) filename = os.path.join(os.path.dirname(caller_file), filename) self.assert_(os.path.exists(filename), 'Script "%s" must exist' % filename) original_test_name = self.marionette.test_name self.marionette.test_name = os.path.basename(filename) f = open(filename, "r") js = f.read() args = [] head_js = JSTest.head_js_re.search(js) if head_js: head_js = head_js.group(3) head = open(os.path.join(os.path.dirname(filename), head_js), "r") js = head.read() + js context = JSTest.context_re.search(js) if context: context = context.group(3) else: context = "content" if "SpecialPowers" in js: self.setup_SpecialPowers_observer() if context == "content": js = "var SpecialPowers = window.wrappedJSObject.SpecialPowers;\n" + js else: marionette.execute_script( """ if (typeof(SpecialPowers) == 'undefined') { let loader = Components.classes["@mozilla.org/moz/jssubscript-loader;1"] .getService(Components.interfaces.mozIJSSubScriptLoader); loader.loadSubScript("chrome://specialpowers/content/specialpowersAPI.js"); loader.loadSubScript("chrome://specialpowers/content/SpecialPowersObserverAPI.js"); loader.loadSubScript("chrome://specialpowers/content/ChromePowers.js"); } """ ) marionette.set_context(context) if context != "chrome": marionette.navigate("data:text/html,<html>test page</html>") timeout = JSTest.timeout_re.search(js) if timeout: timeout = timeout.group(3) marionette.set_script_timeout(timeout) inactivity_timeout = JSTest.inactivity_timeout_re.search(js) if inactivity_timeout: inactivity_timeout = inactivity_timeout.group(3) try: results = marionette.execute_js_script( js, args, inactivity_timeout=inactivity_timeout, filename=os.path.basename(filename) ) self.assertTrue(not "timeout" in filename, "expected timeout not triggered") if "fail" in filename: self.assertTrue(len(results["failures"]) > 0, "expected test failures didn't occur") else: for failure in results["failures"]: diag = "" if failure.get("diag") is None else failure["diag"] name = "got false, expected true" if failure.get("name") is None else failure["name"] self.logger.test_status(self.test_name, name, "FAIL", message=diag) for failure in results["expectedFailures"]: diag = "" if failure.get("diag") is None else failure["diag"] name = "got false, expected false" if failure.get("name") is None else failure["name"] self.logger.test_status(self.test_name, name, "FAIL", expected="FAIL", message=diag) for failure in results["unexpectedSuccesses"]: diag = "" if failure.get("diag") is None else failure["diag"] name = "got true, expected false" if failure.get("name") is None else failure["name"] self.logger.test_status(self.test_name, name, "PASS", expected="FAIL", message=diag) self.assertEqual(0, len(results["failures"]), "%d tests failed" % len(results["failures"])) if len(results["unexpectedSuccesses"]) > 0: raise _UnexpectedSuccess("") if len(results["expectedFailures"]) > 0: raise _ExpectedFailure((AssertionError, AssertionError(""), None)) self.assertTrue( results["passed"] + len(results["failures"]) + len(results["expectedFailures"]) + len(results["unexpectedSuccesses"]) > 0, "no tests run", ) except ScriptTimeoutException: if "timeout" in filename: # expected exception pass else: self.loglines = marionette.get_logs() raise self.marionette.test_name = original_test_name
def run_js_test(self, filename, marionette=None): ''' Run a JavaScript test file and collect its set of assertions into the current test's results. :param filename: The path to the JavaScript test file to execute. May be relative to the current script. :param marionette: The Marionette object in which to execute the test. Defaults to self.marionette. ''' marionette = marionette or self.marionette if not os.path.isabs(filename): # Find the caller's filename and make the path relative to that. caller_file = sys._getframe(1).f_globals.get('__file__', '') caller_file = os.path.abspath(caller_file) filename = os.path.join(os.path.dirname(caller_file), filename) self.assert_(os.path.exists(filename), 'Script "{}" must exist'.format(filename)) original_test_name = self.marionette.test_name self.marionette.test_name = os.path.basename(filename) f = open(filename, 'r') js = f.read() args = [] head_js = JSTest.head_js_re.search(js) if head_js: head_js = head_js.group(3) head = open(os.path.join(os.path.dirname(filename), head_js), 'r') js = head.read() + js context = JSTest.context_re.search(js) if context: context = context.group(3) else: context = 'content' if 'SpecialPowers' in js: self.setup_SpecialPowers_observer() if context == 'content': js = "var SpecialPowers = window.wrappedJSObject.SpecialPowers;\n" + js else: marionette.execute_script(""" if (typeof(SpecialPowers) == 'undefined') { let loader = Components.classes["@mozilla.org/moz/jssubscript-loader;1"] .getService(Components.interfaces.mozIJSSubScriptLoader); loader.loadSubScript("chrome://specialpowers/content/specialpowersAPI.js"); loader.loadSubScript("chrome://specialpowers/content/SpecialPowersObserverAPI.js"); loader.loadSubScript("chrome://specialpowers/content/ChromePowers.js"); } """) marionette.set_context(context) if context != 'chrome': marionette.navigate('data:text/html,<html>test page</html>') timeout = JSTest.timeout_re.search(js) if timeout: timeout = timeout.group(3) marionette.set_script_timeout(timeout) inactivity_timeout = JSTest.inactivity_timeout_re.search(js) if inactivity_timeout: inactivity_timeout = inactivity_timeout.group(3) try: results = marionette.execute_js_script( js, args, inactivity_timeout=inactivity_timeout, filename=os.path.basename(filename)) self.assertTrue(not 'timeout' in filename, 'expected timeout not triggered') if 'fail' in filename: self.assertTrue( len(results['failures']) > 0, "expected test failures didn't occur") else: for failure in results['failures']: diag = "" if failure.get( 'diag') is None else failure['diag'] name = "got false, expected true" if failure.get( 'name') is None else failure['name'] self.logger.test_status(self.test_name, name, 'FAIL', message=diag) for failure in results['expectedFailures']: diag = "" if failure.get( 'diag') is None else failure['diag'] name = "got false, expected false" if failure.get( 'name') is None else failure['name'] self.logger.test_status(self.test_name, name, 'FAIL', expected='FAIL', message=diag) for failure in results['unexpectedSuccesses']: diag = "" if failure.get( 'diag') is None else failure['diag'] name = "got true, expected false" if failure.get( 'name') is None else failure['name'] self.logger.test_status(self.test_name, name, 'PASS', expected='FAIL', message=diag) self.assertEqual( 0, len(results['failures']), '{} tests failed'.format(len(results['failures']))) if len(results['unexpectedSuccesses']) > 0: raise _UnexpectedSuccess('') if len(results['expectedFailures']) > 0: raise _ExpectedFailure( (AssertionError, AssertionError(''), None)) self.assertTrue( results['passed'] + len(results['failures']) + len(results['expectedFailures']) + len(results['unexpectedSuccesses']) > 0, 'no tests run') except ScriptTimeoutException: if 'timeout' in filename: # expected exception pass else: self.loglines = marionette.get_logs() raise self.marionette.test_name = original_test_name