def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None ) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def _execute_test(cls, test: FirmwareTestClass, router: Router, routers: List[Router]) -> TestResult: if not isinstance(router, Router): raise ValueError("Chosen Router is not a real Router...") # proofed: this method runs in other process as the server setproctitle(str(router.id) + " - " + str(test)) logging.debug("%sExecute test " + str(test) + " on Router(" + str(router.id) + ")", LoggerSetup.get_log_deep(2)) test_suite = defaultTestLoader.loadTestsFromTestCase(test) # prepare all test cases for test_case in test_suite: logging.debug("%sTestCase " + str(test_case), LoggerSetup.get_log_deep(4)) test_case.prepare(router, routers) result = TestResult() cls.__setns(router) try: result = test_suite.run(result) except Exception as e: logging.error("%sTestCase execution raised an exception", LoggerSetup.get_log_deep(3)) logging.error("%s" + str(e), LoggerSetup.get_log_deep(3)) test_obj = test() result.addError(test_obj, sys.exc_info()) # add the reason of the exception finally: # I'm sry for this dirty hack, but if you don't do this you get an # "TypeError: cannot serialize '_io.TextIOWrapper' object" because sys.stdout is not serializeable... result._original_stdout = None result._original_stderr = None logging.debug("%sResult from test " + str(result), LoggerSetup.get_log_deep(3)) return result
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) try: test._pre_setup() except Exception: result.addError(test, sys.exc_info()) return True start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def addError(self, test, err): self.result["error_count"] += 1 self.result["class_list"][self.class_name]["error_count"] += 1 self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["status"] = ERROR TestResult.addError(self, test, err) self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["message"] = self.silence_output() self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["error"] = self.errors[-1][1]
def addError(self, test: TestCase, err): TestResult.addError(self, test, err) self.stream.write("ERROR") self.stream.write(" ... msg: " + str(err[1])) self.stream.writeln() execution_time = self.__get_test_execution_time() self.__error_test.append( TestResultInfo( test=test, execution_time=execution_time, status_id=TestStatus.ERROR, error_msg=err, custom_msg=self.__get_api_payload_gui_screenshot(test)))
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): # We are supposed to get here only when running a single test suite # on Python 3.5 or higher (the old import failure prefix is being # checked just in case). When running several test suites at once, # all import failures should be caught in deserialize_suite. import_failure_prefix_old = 'unittest.loader.ModuleImportFailure.' import_failure_prefix_new = 'unittest.loader._FailedTest.' if test_name.startswith(import_failure_prefix_old): actual_test_name = test_name[len(import_failure_prefix_old):] raise TestSuiteImportError(test_name=actual_test_name) elif test_name.startswith(import_failure_prefix_new): actual_test_name = test_name[len(import_failure_prefix_new):] raise TestSuiteImportError(test_name=actual_test_name) else: msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def addError(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addError(self, *args, **kwargs) test_name = full_test_name(args[0]) self.failed_tests.append(test_name)
def addError(self, *args: Any, **kwargs: Any) -> None: TestResult.addError(self, *args, **kwargs) test_name = args[0].id() self.failed_tests.append(test_name)
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): # test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload import_failure_prefix = 'unittest.loader.ModuleImportFailure.' if test_name.startswith(import_failure_prefix): actual_test_name = test_name[len(import_failure_prefix):] error_msg = ("\nActual test to be run is %s, but import failed.\n" "Importing test module directly to generate clearer " "traceback:\n") % (actual_test_name, ) result.addInfo(test, error_msg) try: command = [ sys.executable, "-c", "import %s" % (actual_test_name, ) ] msg = "Import test command: `%s`" % (' '.join(command), ) result.addInfo(test, msg) subprocess.check_call(command) except subprocess.CalledProcessError: msg = ("If that traceback is confusing, try doing the " "import inside `./manage.py shell`") result.addInfo(test, msg) result.addError(test, sys.exc_info()) return True msg = ("Import unexpectedly succeeded! Something is wrong. Try " "running `import %s` inside `./manage.py shell`.\n" "If that works, you may have introduced an import " "cycle.") % (actual_test_name, ) import_error = (Exception, Exception(msg), None ) # type: Tuple[Any, Any, Any] result.addError(test, import_error) return True else: msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None ) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def addError(self, *args: Any, **kwargs: Any) -> None: TestResult.addError(self, *args, **kwargs) test_name = full_test_name(args[0]) self.failed_tests.append(test_name)
def addError(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addError(self, *args, **kwargs)