def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) try: test._pre_setup() except Exception: result.addError(test, sys.exc_info()) return True start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None ) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def _execute_test(cls, test: FirmwareTestClass, router: Router, routers: List[Router]) -> TestResult: if not isinstance(router, Router): raise ValueError("Chosen Router is not a real Router...") # proofed: this method runs in other process as the server setproctitle(str(router.id) + " - " + str(test)) logging.debug("%sExecute test " + str(test) + " on Router(" + str(router.id) + ")", LoggerSetup.get_log_deep(2)) test_suite = defaultTestLoader.loadTestsFromTestCase(test) # prepare all test cases for test_case in test_suite: logging.debug("%sTestCase " + str(test_case), LoggerSetup.get_log_deep(4)) test_case.prepare(router, routers) result = TestResult() cls.__setns(router) try: result = test_suite.run(result) except Exception as e: logging.error("%sTestCase execution raised an exception", LoggerSetup.get_log_deep(3)) logging.error("%s" + str(e), LoggerSetup.get_log_deep(3)) test_obj = test() result.addError(test_obj, sys.exc_info()) # add the reason of the exception finally: # I'm sry for this dirty hack, but if you don't do this you get an # "TypeError: cannot serialize '_io.TextIOWrapper' object" because sys.stdout is not serializeable... result._original_stdout = None result._original_stderr = None logging.debug("%sResult from test " + str(result), LoggerSetup.get_log_deep(3)) return result
def addSubTest(self, test, subtest, outcome): """ Called when a subtest finishes. """ TestResult.addSubTest(self, test, subtest, outcome) self.testsRun += 1 self.current_subtest += 1 self.progress()
def startTest(self, test: TestCase) -> None: TestResult.startTest(self, test) self.stream.writeln( "Running {}".format(full_test_name(test)) ) # type: ignore # https://github.com/python/typeshed/issues/3139 self.stream.flush( ) # type: ignore # https://github.com/python/typeshed/issues/3139
def addSkip(self, test: TestCase, reason: str) -> None: TestResult.addSkip(self, test, reason) self.stream.writeln( "** Skipping {}: {}". format( # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139 test.id(), reason)) self.stream.flush()
def startTest(self, test: TestCase) -> None: TestResult.startTest(self, test) self.stream.writeln( f"Running {full_test_name(test)}" ) # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139 self.stream.flush( ) # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
def addSuccess(self, test): self.result["success_count"] += 1 self.result["class_list"][self.class_name]["success_count"] += 1 self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["status"] = SUCCESS TestResult.addSuccess(self, test) self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["message"] = self.silence_output()
def addSkip(self, test: TestCase, reason: str) -> None: TestResult.addSkip(self, test, reason) self.stream.writeln( "** Skipping {}: {}". format( # type: ignore # https://github.com/python/typeshed/issues/3139 full_test_name(test), reason)) self.stream.flush( ) # type: ignore # https://github.com/python/typeshed/issues/3139
def addUnexpectedSuccess(self, test): self.result["unexpected_success_count"] += 1 self.result["class_list"][self.class_name]["unexpected_success_count"] \ += 1 self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["status"] = UNEXPECTED_SUCCESS TestResult.addUnexpectedSuccess(self, test) self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["message"] = self.silence_output()
def addSuccess(self, test: TestCase): TestResult.addSuccess(self, test) self.stream.write("PASSED") self.stream.writeln() execution_time = self.__get_test_execution_time() self.__passed_test.append( TestResultInfo(test=test, execution_time=execution_time, status_id=TestStatus.PASS))
def addUnexpectedSuccess(self, test): TestResult.addUnexpectedSuccess(self, test) self.stream.write("UNEXPECTED SUCCESS") self.stream.writeln() execution_time = self.__get_test_execution_time() self.__unexpected_successes_test.append( TestResultInfo(test=test, execution_time=execution_time, status_id=TestStatus.UNEXPECTED_SUCCESS))
def enforce_timely_test_completion(test_method: Any, test_name: str, delay: float, result: TestResult) -> None: if hasattr(test_method, 'slowness_reason'): max_delay = 2.0 # seconds else: max_delay = 0.4 # seconds if delay > max_delay: msg = '** Test is TOO slow: %s (%.3f s)\n' % (test_name, delay) result.addInfo(test_method, msg)
def addError(self, test, err): self.result["error_count"] += 1 self.result["class_list"][self.class_name]["error_count"] += 1 self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["status"] = ERROR TestResult.addError(self, test, err) self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["message"] = self.silence_output() self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["error"] = self.errors[-1][1]
def addFailure(self, test, err): self.result["failure_count"] += 1 self.result["class_list"][self.class_name]["failure_count"] += 1 self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["status"] = FAILURE TestResult.addFailure(self, test, err) self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["message"] = self.silence_output() self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["error"] = self.failures[-1][1]
def addSkip(self, test, reason): self.result["skip_count"] += 1 self.result["class_list"][self.class_name]["skip_count"] += 1 self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["status"] = SKIP TestResult.addSkip(self, test, reason) self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["message"] = self.silence_output() self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["error"] = reason
def addSkip(self, test: TestCase, reason): TestResult.addSkip(self, test, reason) self.stream.write("SKIPPED") self.stream.write(" ... reason: " + str(reason)) self.stream.writeln() execution_time = self.__get_test_execution_time() self.__skipped_test.append( TestResultInfo(test=test, execution_time=execution_time, status_id=TestStatus.SKIP, error_msg=reason))
def addFailure(self, test: TestCase, err): TestResult.addFailure(self, test, err) self.stream.write("FAILED") self.stream.write(" ... msg: " + str(err[1])) self.stream.writeln() execution_time = self.__get_test_execution_time() self.__failed_test.append( TestResultInfo( test=test, execution_time=execution_time, status_id=TestStatus.FAIL, error_msg=err, custom_msg=self.__get_api_payload_gui_screenshot(test)))
def _wait_for_test_done(cls, test: FirmwareTestClass, router: Router, done_event: DoneEvent) -> None: """ Wait 2 minutes until the test is done. Handles the result from the tests. Triggers the next job/test. :param test: test to execute :param router: the Router :param done_event: event which will be triggered when the task is finished """ logging.debug("%sWait for test" + str(test), LoggerSetup.get_log_deep(2)) try: async_result = cls._task_pool.apply_async(func=cls._execute_test, args=(test, router, cls._routers)) result = async_result.get(120) # wait 2 minutes or raise an TimeoutError logging.debug("%sTest done " + str(test), LoggerSetup.get_log_deep(1)) logging.debug("%sFrom Router(" + str(router.id) + ")", LoggerSetup.get_log_deep(2)) cls._test_results.append((router.id, str(test), result)) try: length = len(cls._test_results) t = cls._test_results[(length - 1)] cls.write_in_db(str(length), t) except Exception as e: logging.error("Error at write test results into DB: {0}".format(e)) except Exception as e: logging.error("%sTest raised an Exception: " + str(e), LoggerSetup.get_log_deep(1)) result = TestResult() result._original_stdout = None result._original_stderr = None cls._test_results.append((router.id, str(test), result)) cls._task_errors.append((router.id, sys.exc_info())) try: length = len(cls._test_results) t = cls._test_results[(length - 1)] cls.write_in_db(str(length), t) except Exception as e: logging.error("Error at write test results into DB: {0}".format(e)) finally: cls.set_running_task(router, None) # logging.debug(str(cls._test_results)) # start next test in the queue done_event.set() cls.__start_task(router, None)
def __init__(self, stream=None, descriptions=None, outputs=DEFAULT_OUTPUTS, main_test=None): TestResult.__init__(self, stream, descriptions) self.main_test = main_test all_result_handlers = get_result_handlers() self.result_handlers = [ all_result_handlers[result_handler_name]( stream=stream, main_test=main_test, descriptions=descriptions) for result_handler_name in outputs ]
def _run_worker(self, test: Test) -> TestVector: '''Run one unit test returning the TestVector''' # Save the original module and qualname to restore it before returning # the TestVector. Otherwise, Pickle will complain if the classes can't # be found in global scope. The trick here is that we change the names # momentarily just to obtain the error traces in str format test_cls = type(test) module_orig = test_cls.__module__ qualname_orig = test_cls.__qualname__ self._rename_test(test, test.decoder.name, test.test_suite.name) test_result = TestResult() test(test_result) line = '.' if test_result.failures: line = 'F' elif test_result.errors: line = 'E' print(line, end='', flush=True) self._collect_results(test_result) self._rename_test(test, module_orig, qualname_orig) return test.test_vector
def startTest(self, test): """ Starting the test we redirect the output to not show in the screen """ TestResult.startTest(self, test) # Start a new output text result self.output = StringIO() self._stdout.fp = self.output self._stderr.fp = self.output # Conserve the original output self.org_stderr = sys.stderr self.org_stdout = sys.stdout sys.stdout = self._stdout sys.stderr = self._stderr # Define the structure self.class_name = test.__class__.__name__ self.method_name = test._testMethodName if self.class_name not in self.result["class_list"].keys(): class_doc = [] if test.__doc__ is None else \ [ item.strip() for item in test.__doc__.splitlines() \ if item.strip() != "" ] self.result["class_list"][self.class_name] = { "module": test.__module__, "description": class_doc, "success_count": 0, "error_count": 0, "failure_count": 0, "skip_count": 0, "expected_failure_count": 0, "unexpected_success_count": 0, "methods": {} } if self.method_name not in \ self.result["class_list"][self.class_name]["methods"].keys(): method_doc = [""] if test._testMethodDoc is None else \ [ item.strip() for item in \ test._testMethodDoc.splitlines() if item.strip() != "" ] self.result["class_list"][self.class_name]["methods"][self.method_name] = \ { "started": datetime.now(), "status": None, "stopped": None, "message": "", "error": None, "description": method_doc }
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): # We are supposed to get here only when running a single test suite # on Python 3.5 or higher (the old import failure prefix is being # checked just in case). When running several test suites at once, # all import failures should be caught in deserialize_suite. import_failure_prefix_old = 'unittest.loader.ModuleImportFailure.' import_failure_prefix_new = 'unittest.loader._FailedTest.' if test_name.startswith(import_failure_prefix_old): actual_test_name = test_name[len(import_failure_prefix_old):] raise TestSuiteImportError(test_name=actual_test_name) elif test_name.startswith(import_failure_prefix_new): actual_test_name = test_name[len(import_failure_prefix_new):] raise TestSuiteImportError(test_name=actual_test_name) else: msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def __init__(self, stream=None, descriptions=None, verbosity=None): self.result = { "success_count": 0, "error_count": 0, "failure_count": 0, "skip_count": 0, "expected_failure_count": 0, "unexpected_success_count": 0, "class_list": {}, } TestResult.__init__(self, stream=stream, descriptions=descriptions, verbosity=verbosity) self._stderr = StandardDevice(sys.stderr) self._stdout = StandardDevice(sys.stdout) self.org_stderr = None self.org_stdout = None self.stream = stream self.verbosity = verbosity
def run(self, result: TestResult, debug: Optional[bool] = False) -> TestResult: """ This function mostly contains the code from unittest.TestSuite.run. The need to override this function occurred because we use run_test to run the testcase. """ topLevel = False if getattr(result, '_testRunEntered', False) is False: result._testRunEntered = topLevel = True # type: ignore[attr-defined] for test in self: # but this is correct. Taken from unittest. if result.shouldStop: break if isinstance(test, TestSuite): test.run(result, debug=debug) else: self._tearDownPreviousClass( test, result) # type: ignore[attr-defined] self._handleModuleFixture(test, result) # type: ignore[attr-defined] self._handleClassSetUp(test, result) # type: ignore[attr-defined] result._previousTestClass = test.__class__ # type: ignore[attr-defined] if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue failed = run_test(test, result) if failed or result.shouldStop: result.shouldStop = True break if topLevel: self._tearDownPreviousClass(None, result) # type: ignore[attr-defined] self._handleModuleTearDown(result) # type: ignore[attr-defined] result._testRunEntered = False # type: ignore[attr-defined] return result
def _wait_for_test_done(cls, test: FirmwareTestClass, router: Router, done_event: DoneEvent) -> None: """ Wait 5 minutes until the test is done. Handles the result from the tests. Triggers the next job/test. :param test: test to execute :param router: the Router """ logging.debug("%sWait for test" + str(test), LoggerSetup.get_log_deep(2)) try: async_result = cls._task_pool.apply_async(func=cls._execute_test, args=(test, router)) result = async_result.get( 300) # wait 5 minutes or raise an TimeoutError logging.debug("%sTest done " + str(test), LoggerSetup.get_log_deep(1)) logging.debug("%sFrom " + str(router), LoggerSetup.get_log_deep(2)) cls._test_results.append((router.id, str(test), result)) except Exception as e: # TODO #105 logging.error("%sTest raised an Exception: " + str(e), LoggerSetup.get_log_deep(1)) result = TestResult() result._original_stdout = None result._original_stderr = None # result.addError(None, (type(exception), exception, None)) # TODO exception handling for failed Tests cls._test_results.append((router.id, str(test), result)) finally: cls.set_running_task(router, None) # logging.debug(str(cls._test_results)) # start next test in the queue done_event.set() cls.__start_task(router, None)
def _execute_test(cls, test: FirmwareTestClass, router: Router) -> TestResult: if not isinstance(router, Router): raise ValueError("Chosen Router is not a real Router...") # proofed: this method runs in other process as the server logging.debug("%sExecute test " + str(test) + " on " + str(router), LoggerSetup.get_log_deep(2)) test_suite = defaultTestLoader.loadTestsFromTestCase(test) # prepare all test cases for test_case in test_suite: logging.debug("%sTestCase " + str(test_case), LoggerSetup.get_log_deep(4)) test_case.prepare(router) result = TestResult() cls.__setns(router) try: result = test_suite.run( result) # TODO if debug set, run as debug() except Exception as e: logging.error("%sTestCase raised an exception", LoggerSetup.get_log_deep(3)) logging.error("%s" + str(e), LoggerSetup.get_log_deep(3)) finally: # I'm sry for this dirty hack, but if you don't do this you get an # "TypeError: cannot serialize '_io.TextIOWrapper' object" because sys.stdout is not serializeable... result._original_stdout = None result._original_stderr = None logging.debug("%sResult from test " + str(result), LoggerSetup.get_log_deep(3)) return result
def run(self, result: TestResult, debug: Optional[bool]=False) -> TestResult: """ This function mostly contains the code from unittest.TestSuite.run. The need to override this function occurred because we use run_test to run the testcase. """ topLevel = False if getattr(result, '_testRunEntered', False) is False: result._testRunEntered = topLevel = True for test in self: # but this is correct. Taken from unittest. if result.shouldStop: break if isinstance(test, TestSuite): test.run(result, debug=debug) else: self._tearDownPreviousClass(test, result) # type: ignore self._handleModuleFixture(test, result) # type: ignore self._handleClassSetUp(test, result) # type: ignore result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue failed = run_test(test, result) if failed or result.shouldStop: result.shouldStop = True break if topLevel: self._tearDownPreviousClass(None, result) # type: ignore self._handleModuleTearDown(result) # type: ignore result._testRunEntered = False return result
def addError(self, *args: Any, **kwargs: Any) -> None: TestResult.addError(self, *args, **kwargs) test_name = full_test_name(args[0]) self.failed_tests.append(test_name)
def start(cls, config_path: str = CONFIG_PATH) -> None: """ Starts the runtime server with all components. :param config_path: Path to an alternative config directory """ # server has to be run with root rights - except on travis CI if not os.geteuid() == 0 and not os.environ.get('TRAVIS'): sys.exit('Script must be run as root') cls._stopped = Lock() signal.signal(signal.SIGTERM, cls._signal_term_handler) cls.CONFIG_PATH = config_path # set the config_path at the manager ConfigManager.set_config_path(config_path) # read from config the Vlan mode vlan_activate = ConfigManager.get_server_property("Vlan_On") cls.VLAN = vlan_activate # read from config if debug mode is on log_level = int(ConfigManager.get_server_property("Log_Level")) debug_mode = False if log_level is 10: debug_mode = True cls.DEBUG = debug_mode setproctitle("fftserver") cls._server_stop_event = Event() cls._pid = os.getpid() # create instance and give params to the logger object LoggerSetup.setup(log_level) # load Router configs cls.__load_configuration() for router in cls.get_routers(): cls._running_task.append(None) cls._waiting_tasks.append(deque()) # start process/thread pool for job and test handling cls._max_subprocesses = (len(cls._routers) + 1) # plus one for the power strip cls._task_pool = Pool(processes=cls._max_subprocesses, initializer=init_process, initargs=(cls._server_stop_event,), maxtasksperchild=1) cls._task_wait_executor = ThreadPoolExecutor(max_workers=(cls._max_subprocesses * 2)) # start thread for multiprocess stop wait t = threading.Thread(target=cls._close_wait) t.start() # add Namespace and Vlan for each Router if cls.VLAN: cls._nv_assistent = NVAssistent("eth0") for router in cls.get_routers(): logging.debug("Add Namespace and Vlan for Router(" + str(router.id) + ")") cls._nv_assistent.create_namespace_vlan(router) # add Namespace and Vlan for 1 Powerstrip (expand to more if necessary) logging.debug("Add Namespace and Vlan for Powerstrip") cls._nv_assistent.create_namespace_vlan(cls.get_power_strip()) # update Router cls.router_online(None, update_all=True, blocked=True) cls.update_router_info(None, update_all=True) # open database and read old test results try: with shelve.open('test_results', 'c') as db: # read test values key_list = db.keys() for k in key_list: t = TestResult() dbt = db[str(k)] t.failures = dbt.failures t.errors = dbt.errors t.testsRun = dbt.testsRun t._original_stdout = None t._original_stderr = None cls._test_results.append((dbt.router_id, dbt.test_name, t)) except Exception as e: logging.error("Error at read test results from DB: {0}".format(e)) logging.info("Runtime Server started") try: cls._ipc_server.start_ipc_server(cls, True) # serves forever - works like a while(true) except (KeyboardInterrupt, SystemExit): logging.info("Received an interrupt signal") cls.stop()
def addError(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addError(self, *args, **kwargs)
def addSuccess(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addSuccess(self, *args, **kwargs)
def addFailure(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addFailure(self, *args, **kwargs) test_name = full_test_name(args[0]) self.failed_tests.append(test_name)
def startTest(self, test): # type: (TestCase) -> None TestResult.startTest(self, test) self.stream.writeln("Running {}".format(full_test_name(test))) self.stream.flush()
def addSuccess(self, *args: Any, **kwargs: Any) -> None: TestResult.addSuccess(self, *args, **kwargs)
def addSkip(self, test: TestCase, reason: Text) -> None: TestResult.addSkip(self, test, reason) self.stream.writeln("** Skipping {}: {}".format(full_test_name(test), reason)) self.stream.flush()
def addFailure(self, *args: Any, **kwargs: Any) -> None: TestResult.addFailure(self, *args, **kwargs) test_name = full_test_name(args[0]) self.failed_tests.append(test_name)
def startTest(self, test: TestCase) -> None: TestResult.startTest(self, test) self.stream.writeln("Running {}".format(full_test_name(test))) self.stream.flush()
def __init__(self): TestResult.__init__(self) self.err = None
def addSkip(self, test: TestCase, reason: str) -> None: TestResult.addSkip(self, test, reason) self.stream.writeln("** Skipping {}: {}".format( full_test_name(test), reason)) self.stream.flush()
def addFailure(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addFailure(self, *args, **kwargs)
def stopTest(self, test): self.silence_output() self.result["class_list"][self.class_name]["methods"][self.method_name]\ ["stopped"] = datetime.now() TestResult.stopTest(self, test)
def run_test(test: TestCase, result: TestResult) -> bool: failed = False test_method = get_test_method(test) if fast_tests_only() and is_known_slow_test(test_method): return failed test_name = full_test_name(test) bounce_key_prefix_for_testing(test_name) bounce_redis_key_prefix_for_testing(test_name) flush_caches_for_testing() if not hasattr(test, "_pre_setup"): # test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload import_failure_prefix = 'unittest.loader.ModuleImportFailure.' if test_name.startswith(import_failure_prefix): actual_test_name = test_name[len(import_failure_prefix):] error_msg = ("\nActual test to be run is %s, but import failed.\n" "Importing test module directly to generate clearer " "traceback:\n") % (actual_test_name, ) result.addInfo(test, error_msg) try: command = [ sys.executable, "-c", "import %s" % (actual_test_name, ) ] msg = "Import test command: `%s`" % (' '.join(command), ) result.addInfo(test, msg) subprocess.check_call(command) except subprocess.CalledProcessError: msg = ("If that traceback is confusing, try doing the " "import inside `./manage.py shell`") result.addInfo(test, msg) result.addError(test, sys.exc_info()) return True msg = ("Import unexpectedly succeeded! Something is wrong. Try " "running `import %s` inside `./manage.py shell`.\n" "If that works, you may have introduced an import " "cycle.") % (actual_test_name, ) import_error = (Exception, Exception(msg), None ) # type: Tuple[Any, Any, Any] result.addError(test, import_error) return True else: msg = "Test doesn't have _pre_setup; something is wrong." error_pre_setup = (Exception, Exception(msg), None ) # type: Tuple[Any, Any, Any] result.addError(test, error_pre_setup) return True test._pre_setup() start_time = time.time() test(result) # unittest will handle skipping, error, failure and success. delay = time.time() - start_time enforce_timely_test_completion(test_method, test_name, delay, result) slowness_reason = getattr(test_method, 'slowness_reason', '') TEST_TIMINGS.append((delay, test_name, slowness_reason)) test._post_teardown() return failed
def addFailure(self, *args: Any, **kwargs: Any) -> None: TestResult.addFailure(self, *args, **kwargs) test_name = args[0].id() self.failed_tests.append(test_name)
def startTest(self, test: TestCase) -> None: TestResult.startTest(self, test) self.stream.write(f"Running {test.id()}\n") self.stream.flush()
def addSkip(self, test: TestCase, reason: str) -> None: TestResult.addSkip(self, test, reason) self.stream.write(f"** Skipping {test.id()}: {reason}\n") self.stream.flush()
def addSkip(self, test, reason): # type: (TestCase, Text) -> None TestResult.addSkip(self, test, reason) self.stream.writeln("** Skipping {}: {}".format( full_test_name(test), reason)) self.stream.flush()
def addError(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addError(self, *args, **kwargs) test_name = full_test_name(args[0]) self.failed_tests.append(test_name)