def add_error(self, test, err, unittest_fn, error_type): if error_type == FAIL: self.log_error(test, err, 'addFailure') error_type_str = colorize("FAIL", RED) elif error_type == ERROR: self.log_error(test, err, 'addError') error_type_str = colorize("ERROR", RED) else: raise Exception('Error type %s cannot be used to record an ' 'error or a failure' % error_type) unittest_fn(self, test, err) if self.current_test_case_info: self.result_string = "%s [ temp dir used by test case: %s ]" % \ (error_type_str, self.current_test_case_info.tempdir) self.symlink_failed() self.failed_test_cases_info.add(self.current_test_case_info) if is_core_present(self.current_test_case_info.tempdir): if not self.current_test_case_info.core_crash_test: if isinstance(test, unittest.suite._ErrorHolder): test_name = str(test) else: test_name = "'{}' ({})".format( get_testcase_doc_name(test), test.id()) self.current_test_case_info.core_crash_test = test_name self.core_crash_test_cases_info.add( self.current_test_case_info) else: self.result_string = '%s [no temp dir]' % error_type_str self.send_result_through_pipe(test, error_type)
def print_results(self): print('') print(double_line_delim) print('TEST RESULTS:') print(' Scheduled tests: {}'.format(self.all_testcases)) print(' Executed tests: {}'.format(self[TEST_RUN])) print(' Passed tests: {}'.format( colorize(str(self[PASS]), GREEN))) if self[SKIP] > 0: print(' Skipped tests: {}'.format( colorize(str(self[SKIP]), YELLOW))) if self.not_executed > 0: print(' Not Executed tests: {}'.format( colorize(str(self.not_executed), RED))) if self[FAIL] > 0: print(' Failures: {}'.format( colorize(str(self[FAIL]), RED))) if self[ERROR] > 0: print(' Errors: {}'.format( colorize(str(self[ERROR]), RED))) if self.all_failed > 0: print('FAILURES AND ERRORS IN TESTS:') for result in self.results_per_suite: failed_testcase_ids = result[FAIL] errored_testcase_ids = result[ERROR] old_testcase_name = None if failed_testcase_ids: for failed_test_id in failed_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(failed_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' FAILURE: {} [{}]'.format( colorize(test_name, RED), failed_test_id)) if errored_testcase_ids: for errored_test_id in errored_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(errored_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' ERROR: {} [{}]'.format( colorize(test_name, RED), errored_test_id)) if self.testsuites_no_tests_run: print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:') tc_classes = set() for testsuite in self.testsuites_no_tests_run: for testcase in testsuite: tc_classes.add(get_testcase_doc_name(testcase)) for tc_class in tc_classes: print(' {}'.format(colorize(tc_class, RED))) print(double_line_delim) print('')
def print_results(self): print('') print(double_line_delim) print('TEST RESULTS:') print(' Scheduled tests: {}'.format(self.all_testcases)) print(' Executed tests: {}'.format(self[TEST_RUN])) print(' Passed tests: {}'.format( colorize(str(self[PASS]), GREEN))) if self[SKIP] > 0: print(' Skipped tests: {}'.format( colorize(str(self[SKIP]), YELLOW))) if self.not_executed > 0: print(' Not Executed tests: {}'.format( colorize(str(self.not_executed), RED))) if self[FAIL] > 0: print(' Failures: {}'.format( colorize(str(self[FAIL]), RED))) if self[ERROR] > 0: print(' Errors: {}'.format( colorize(str(self[ERROR]), RED))) if self.all_failed > 0: print('FAILURES AND ERRORS IN TESTS:') for result in self.results_per_suite: failed_testcase_ids = result[FAIL] errored_testcase_ids = result[ERROR] old_testcase_name = None if failed_testcase_ids or errored_testcase_ids: for failed_test_id in failed_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(failed_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' FAILURE: {} [{}]'.format( colorize(test_name, RED), failed_test_id)) for failed_test_id in errored_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(failed_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' ERROR: {} [{}]'.format( colorize(test_name, RED), failed_test_id)) if self.testsuites_no_tests_run: print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:') tc_classes = set() for testsuite in self.testsuites_no_tests_run: for testcase in testsuite: tc_classes.add(get_testcase_doc_name(testcase)) for tc_class in tc_classes: print(' {}'.format(colorize(tc_class, RED))) print(double_line_delim) print('')
def addError(self, test, err): """ Record a test error result :param test: :param err: error message """ if hasattr(test, 'logger'): test.logger.debug("--- addError() %s.%s(%s) called, err is %s" % (test.__class__.__name__, test._testMethodName, test._testMethodDoc, err)) test.logger.debug("formatted exception is:\n%s" % "".join(format_exception(*err))) unittest.TestResult.addError(self, test, err) if hasattr(test, 'tempdir'): self.result_string = colorize("ERROR", RED) + \ ' [ temp dir used by test case: ' + test.tempdir + ' ]' self.symlink_failed(test) else: self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
def print_results(self): print('') print(double_line_delim) print('TEST RESULTS:') print(' Executed tests: {}'.format(self.all_testcases)) print(' Passed tests: {}'.format( colorize(str(self.passed), GREEN))) if self[self.failures_id] > 0: print(' Failures: {}'.format( colorize(str(self[self.failures_id]), RED))) if self[self.errors_id] > 0: print(' Errors: {}'.format( colorize(str(self[self.errors_id]), RED))) if self[self.skipped_id] > 0: print(' Skipped tests: {}'.format( colorize(str(self[self.skipped_id]), YELLOW))) if self[self.expectedFailures_id] > 0: print(' Expected failures: {}'.format( colorize(str(self[self.expectedFailures_id]), GREEN))) if self[self.unexpectedSuccesses_id] > 0: print(' Unexpected successes: {}'.format( colorize(str(self[self.unexpectedSuccesses_id]), YELLOW))) if self.all_failed > 0: print('FAILED TESTS:') for testcase_class, suite_results in \ self.results_per_suite.items(): failed_testcases = suite_results[ self.failures_id] errored_testcases = suite_results[ self.errors_id] if len(failed_testcases) or len(errored_testcases): print(' Testcase name: {}'.format( colorize(testcase_class, RED))) for failed_test in failed_testcases: print(' FAILED: {}'.format( colorize(get_test_description( descriptions, failed_test), RED))) for failed_test in errored_testcases: print(' ERRORED: {}'.format( colorize(get_test_description( descriptions, failed_test), RED))) print(double_line_delim) print('')
def addSuccess(self, test): """ Record a test succeeded result :param test: """ if hasattr(test, 'logger'): test.logger.debug("--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__, test._testMethodName, test._testMethodDoc)) unittest.TestResult.addSuccess(self, test) self.result_string = colorize("OK", GREEN)
def addSkip(self, test, reason): """ Record a test skipped. :param test: :param reason: """ if hasattr(test, 'logger'): test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s" % (test.__class__.__name__, test._testMethodName, test._testMethodDoc, reason)) unittest.TestResult.addSkip(self, test, reason) self.result_string = colorize("SKIP", YELLOW)
def addSuccess(self, test): """ Record a test succeeded result :param test: """ if self.current_test_case_info: self.current_test_case_info.logger.debug( "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__, test._testMethodName, test._testMethodDoc)) unittest.TestResult.addSuccess(self, test) self.result_string = colorize("OK", GREEN) self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason): """ Record a test skipped. :param test: :param reason: """ if self.current_test_case_info: self.current_test_case_info.logger.debug( "--- addSkip() %s.%s(%s) called, reason is %s" % (test.__class__.__name__, test._testMethodName, test._testMethodDoc, reason)) unittest.TestResult.addSkip(self, test, reason) self.result_string = colorize("SKIP", YELLOW) self.send_result_through_pipe(test, SKIP)
def setUpClass(cls): """ Perform class setup before running the testcase Remove shared memory files, start vpp and connect the vpp-api """ gc.collect() # run garbage collection first random.seed() cls.logger = get_logger(cls.__name__) if hasattr(cls, 'parallel_handler'): cls.logger.addHandler(cls.parallel_handler) cls.logger.propagate = False cls.tempdir = tempfile.mkdtemp( prefix='vpp-unittest-%s-' % cls.__name__) cls.stats_sock = "%s/stats.sock" % cls.tempdir cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir) cls.file_handler.setFormatter( Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s', datefmt="%H:%M:%S")) cls.file_handler.setLevel(DEBUG) cls.logger.addHandler(cls.file_handler) cls.shm_prefix = os.path.basename(cls.tempdir) os.chdir(cls.tempdir) cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir, cls.shm_prefix) cls.setUpConstants() cls.reset_packet_infos() cls._captures = [] cls._zombie_captures = [] cls.verbose = 0 cls.vpp_dead = False cls.registry = VppObjectRegistry() cls.vpp_startup_failed = False cls.reporter = KeepAliveReporter() # need to catch exceptions here because if we raise, then the cleanup # doesn't get called and we might end with a zombie vpp try: cls.run_vpp() cls.reporter.send_keep_alive(cls, 'setUpClass') VppTestResult.current_test_case_info = TestCaseInfo( cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin) cls.vpp_stdout_deque = deque() cls.vpp_stderr_deque = deque() cls.pump_thread_stop_flag = Event() cls.pump_thread_wakeup_pipe = os.pipe() cls.pump_thread = Thread(target=pump_output, args=(cls,)) cls.pump_thread.daemon = True cls.pump_thread.start() if cls.debug_gdb or cls.debug_gdbserver: read_timeout = 0 else: read_timeout = 5 cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls, read_timeout) if cls.step: hook = StepHook(cls) else: hook = PollHook(cls) cls.vapi.register_hook(hook) cls.wait_for_stats_socket() cls.statistics = VPPStats(socketname=cls.stats_sock) try: hook.poll_vpp() except VppDiedError: cls.vpp_startup_failed = True cls.logger.critical( "VPP died shortly after startup, check the" " output to standard error for possible cause") raise try: cls.vapi.connect() except Exception: try: cls.vapi.disconnect() except Exception: pass if cls.debug_gdbserver: print(colorize("You're running VPP inside gdbserver but " "VPP-API connection failed, did you forget " "to 'continue' VPP from within gdb?", RED)) raise except Exception: try: cls.quit() except Exception: pass raise
def print_header(test): if not hasattr(test.__class__, '_header_printed'): print(double_line_delim) print(colorize(getdoc(test).splitlines()[0], GREEN)) print(double_line_delim) test.__class__._header_printed = True
def print_results(self): print('') print(double_line_delim) print('TEST RESULTS:') def indent_results(lines): lines = list(filter(None, lines)) maximum = max(lines, key=lambda x: x.index(":")) maximum = 4 + maximum.index(":") for l in lines: padding = " " * (maximum - l.index(":")) print(f"{padding}{l}") indent_results([ f'Scheduled tests: {self.all_testcases}', f'Executed tests: {self[TEST_RUN]}', f'Passed tests: {colorize(self[PASS], GREEN)}', f'Skipped tests: {colorize(self[SKIP], YELLOW)}' if self[SKIP] else None, f'Not Executed tests: {colorize(self.not_executed, RED)}' if self.not_executed else None, f'Failures: {colorize(self[FAIL], RED)}' if self[FAIL] else None, f'Errors: {colorize(self[ERROR], RED)}' if self[ERROR] else None, 'Tests skipped due to lack of CPUS: ' f'{colorize(self[SKIP_CPU_SHORTAGE], YELLOW)}' if self[SKIP_CPU_SHORTAGE] else None ]) if self.all_failed > 0: print('FAILURES AND ERRORS IN TESTS:') for result in self.results_per_suite: failed_testcase_ids = result[FAIL] errored_testcase_ids = result[ERROR] old_testcase_name = None if failed_testcase_ids: for failed_test_id in failed_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(failed_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' FAILURE: {} [{}]'.format( colorize(test_name, RED), failed_test_id)) if errored_testcase_ids: for errored_test_id in errored_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(errored_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' ERROR: {} [{}]'.format( colorize(test_name, RED), errored_test_id)) if self.testsuites_no_tests_run: print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:') tc_classes = set() for testsuite in self.testsuites_no_tests_run: for testcase in testsuite: tc_classes.add(get_testcase_doc_name(testcase)) for tc_class in tc_classes: print(' {}'.format(colorize(tc_class, RED))) if self[SKIP_CPU_SHORTAGE]: print() print(colorize(' SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT' ' ENOUGH CPUS AVAILABLE', YELLOW)) print(double_line_delim) print('')
failfast=failfast, print_summary=True).run(full_suite) was_successful = result.wasSuccessful() if not was_successful: for test_case_info in result.failed_test_cases_info: handle_failed_suite(test_case_info.logger, test_case_info.tempdir, test_case_info.vpp_pid) if test_case_info in result.core_crash_test_cases_info: check_and_handle_core(test_case_info.vpp_bin_path, test_case_info.tempdir, test_case_info.core_crash_test) if cpu_shortage: print() print(colorize('SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT' ' ENOUGH CPUS AVAILABLE', YELLOW)) print() sys.exit(not was_successful) else: print('Running each VPPTestCase in a separate background process' f' with at most {max_concurrent_tests} parallel python test ' 'process(es)') exit_code = 0 while suites and attempts > 0: results = run_forked(suites) exit_code, suites = parse_results(results) attempts -= 1 if exit_code == 0: print('Test run was successful') else: print('%s attempt(s) left.' % attempts)
def print_header(cls): if not hasattr(cls, '_header_printed'): print(double_line_delim) print(colorize(getdoc(cls).splitlines()[0], GREEN)) print(double_line_delim) cls._header_printed = True
def setUpClass(cls): """ Perform class setup before running the testcase Remove shared memory files, start vpp and connect the vpp-api """ gc.collect() # run garbage collection first random.seed() cls.logger = getLogger(cls.__name__) cls.tempdir = tempfile.mkdtemp(prefix='vpp-unittest-%s-' % cls.__name__) cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir) cls.file_handler.setFormatter( Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s', datefmt="%H:%M:%S")) cls.file_handler.setLevel(DEBUG) cls.logger.addHandler(cls.file_handler) cls.shm_prefix = cls.tempdir.split("/")[-1] os.chdir(cls.tempdir) cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir, cls.shm_prefix) cls.setUpConstants() cls.reset_packet_infos() cls._captures = [] cls._zombie_captures = [] cls.verbose = 0 cls.vpp_dead = False cls.registry = VppObjectRegistry() cls.vpp_startup_failed = False cls.reporter = KeepAliveReporter() # need to catch exceptions here because if we raise, then the cleanup # doesn't get called and we might end with a zombie vpp try: cls.run_vpp() cls.reporter.send_keep_alive(cls) cls.vpp_stdout_deque = deque() cls.vpp_stderr_deque = deque() cls.pump_thread_stop_flag = Event() cls.pump_thread_wakeup_pipe = os.pipe() cls.pump_thread = Thread(target=pump_output, args=(cls, )) cls.pump_thread.daemon = True cls.pump_thread.start() cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls) if cls.step: hook = StepHook(cls) else: hook = PollHook(cls) cls.vapi.register_hook(hook) cls.sleep(0.1, "after vpp startup, before initial poll") try: hook.poll_vpp() except: cls.vpp_startup_failed = True cls.logger.critical( "VPP died shortly after startup, check the" " output to standard error for possible cause") raise try: cls.vapi.connect() except: if cls.debug_gdbserver: print( colorize( "You're running VPP inside gdbserver but " "VPP-API connection failed, did you forget " "to 'continue' VPP from within gdb?", RED)) raise except: t, v, tb = sys.exc_info() try: cls.quit() except: pass raise (t, v, tb)
test_case_info.vpp_pid, config.vpp, ) if test_case_info in result.core_crash_test_cases_info: check_and_handle_core( test_case_info.vpp_bin_path, test_case_info.tempdir, test_case_info.core_crash_test, ) if cpu_shortage: print() print( colorize( "SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT" " ENOUGH CPUS AVAILABLE", YELLOW, ) ) print() sys.exit(not was_successful) else: print( "Running each VPPTestCase in a separate background process" f" with at most {max_concurrent_tests} parallel python test " "process(es)" ) exit_code = 0 while suites and attempts > 0: results = run_forked(suites) exit_code, suites = parse_results(results)