def show_failures_and_errors() -> TestResult: results = {} results['is_regex'] = TestResult() results['all_regex_permutations'] = TestResult() results['match_regex'] = TestResult() results['build_regex_tree'] = TestResult() is_regex_suite.run(results['is_regex']) all_regex_permutations_suite.run(results['all_regex_permutations']) match_regex_suite.run(results['match_regex']) build_regex_tree_suite.run(results['build_regex_tree']) failures = {} for case in results.keys(): failures[case] = [e[0]._testMethodName for e in results[case].failures] errors = {} for case in results.keys(): errors[case] = [e[0]._testMethodName for e in results[case].errors] for (case, methods) in failures.items(): for m in methods: print("failure:{}.{}".format(case, m)) for (case, methods) in errors.items(): for m in methods: print("error: {}.{}".format(case, m))
def test_multiple_with_setup(self): from nose.tools import with_setup from nose.case import FunctionTestCase from unittest import TestResult called = [] def test(): called.append('test') def test2(): called.append('test2') def test3(): called.append('test3') def s1(): called.append('s1') def s2(): called.append('s2') def s3(): called.append('s3') def t1(): called.append('t1') def t2(): called.append('t2') def t3(): called.append('t3') ws1 = with_setup(s1, t1)(test) case1 = FunctionTestCase(ws1) case1(TestResult()) self.assertEqual(called, ['s1', 'test', 't1']) called[:] = [] ws2 = with_setup(s2, t2)(test2) ws2 = with_setup(s1, t1)(ws2) case2 = FunctionTestCase(ws2) case2(TestResult()) self.assertEqual(called, ['s1', 's2', 'test2', 't2', 't1']) called[:] = [] ws3 = with_setup(s3, t3)(test3) ws3 = with_setup(s2, t2)(ws3) ws3 = with_setup(s1, t1)(ws3) case3 = FunctionTestCase(ws3) case3(TestResult()) self.assertEqual(called, ['s1', 's2', 's3', 'test3', 't3', 't2', 't1'])
def test_validationNotRunForSkip(self): """ If the decorated test raises L{SkipTest} then the logging validation is also skipped. """ class MyTest(TestCase): recorded = False def record(self, logger): self.recorded = True @validateLogging(record) def runTest(self, logger): raise SkipTest("Do not run this test.") test = MyTest() result = TestResult() test.run(result) # Verify that the validation function did not run and that the test was # nevertheless marked as a skip with the correct reason. self.assertEqual( (test.recorded, result.skipped, result.errors, result.failures), (False, [(test, "Do not run this test.")], [], []))
def test_step_with_teardown_class_failure(self): tinc_test_case = MockScenarioTestCaseWithClassFixtures('test_step_with_teardown_class_failure') tinc_test_case.__class__.__unittest_skip__ = False result = TestResult() tinc_test_case.run(result) self.assertEqual(len(result.failures), 1) self.assertTrue('tearDownClass failed for' in result.failures[0][1])
def test_signalledExit(self): """ An error should be reported if the JavaScript interpreter exits because it received a signal. """ segfault = FilePath(self.mktemp()) segfault.setContent("""\ #!/usr/bin/python # Generate an unhandled SIGSEGV for this process immediately upon import. import os, signal os.kill(os.getpid(), signal.SIGSEGV) """) def stubFinder(): return sys.executable def stubScript(testModule): return segfault.path self.case.findJavascriptInterpreter = stubFinder self.case.makeScript = stubScript result = TestResult() self.case.run(result) self.assertEqual(len(result.errors), 1) self.assertEqual( result.errors[0][1], 'Exception: JavaScript interpreter exited due to signal 11\n')
def start_test(self): print "==============================" print "TEST " + self.testclass.__name__ print "==============================" testresult = TestResult() ui = self.testclass(self.testcase) print print "### SETUP" print "TESTCASE: " + self.testcase print ui.setSeleniumServerInfo(self.selenium_server_ip, self.selenium_server_port) ui.setUIInfo(self.ui_ip, self.port) ui.setUserInfo(self.accountname, self.username, self.password) print print "### TEST" ui.run(testresult) print print "### RESULT" print "Failures: " + str(len(testresult.failures)) if len(testresult.failures) > 0: print testresult.failures print "Errors: " + str(len(testresult.errors)) if len(testresult.errors) > 0: print testresult.errors print print "==============================" print "END OF TEST : " + self.testclass.__name__ print "==============================" exit(len(testresult.failures) + len(testresult.errors))
def test_unexpected_warning(self): msg = 'Testing unexpected warnings, nothing to see here.' category = ResourceWarning for parents in ((AzulUnitTestCase, ), (AzulUnitTestCase, AlwaysTearDownTestCase), (AlwaysTearDownTestCase, AzulUnitTestCase)): with self.subTest(parents=parents): class Test(*parents): def test(self): warnings.warn(message=msg, category=category) case = Test('test') suite = TestSuite() result = TestResult() suite.addTest(case) suite.run(result) self.assertEqual(1, result.testsRun) failed_test, trace_back = cast(Tuple[Any, str], one(result.errors)) self.assertEqual( f'tearDownClass ({__name__}.{Test.__qualname__})', str(failed_test)) error_line = trace_back.splitlines()[-1] self.assertRegex(error_line, '^AssertionError') self.assertIn(str(category(msg)), error_line)
def runSuite(title, suite, summary): ''' Run the indicated test suite, accumulating statistics and the title in the summary. The summary is a list of strings. ''' splash(title) sys.argv = [""] results = TestResult() suite.run(results) summary.append(("Summary of %s " % (title)) + VISUAL_SEPARATOR) summary.append("* Ran %i tests with %i failures and %i errors." % (results.testsRun, results.failures.__len__(), results.errors.__len__())) if results.failures.__len__(): summary.append("* Failed tests were:") for test, trace in results.failures: print("FAIL: ", test, "(unit test)") print(trace) summary.append(repr(test)) if results.errors.__len__(): summary.append("* Tests in error were:") for test, trace in results.errors: print("ERROR: ", test, "(unit test)") print(trace) summary.append(repr(test)) return summary
def test_multiple_steps_with_multiple_class_fixtures(self): tinc_test_case = MockScenarioTestCaseWithClassFixtures( 'test_multiple_steps_with_multiple_class_fixtures') tinc_test_case.__class__.__unittest_skip__ = False result = TestResult() tinctest.models.scenario.test.test_scenario_test_case.MockTINCTestCaseWithClassFixtures.teardown_class = 0 tinctest.models.scenario.test.test_scenario_test_case.MockTINCTestCaseWithClassFixtures.setup_class = 0 tinctest.models.scenario.test.test_scenario_test_case.MockAnotherTINCTestCaseWithClassFixtures.teardown_class = 0 tinctest.models.scenario.test.test_scenario_test_case.MockAnotherTINCTestCaseWithClassFixtures.setup_class = 0 tinc_test_case.run(result) # Verify that setup class and teardown class for both classes was executed once, for each step self.assertEquals( tinctest.models.scenario.test.test_scenario_test_case. MockTINCTestCaseWithClassFixtures.teardown_class, 2) self.assertEquals( tinctest.models.scenario.test.test_scenario_test_case. MockTINCTestCaseWithClassFixtures.setup_class, 2) self.assertEquals( tinctest.models.scenario.test.test_scenario_test_case. MockAnotherTINCTestCaseWithClassFixtures.teardown_class, 2) self.assertEquals( tinctest.models.scenario.test.test_scenario_test_case. MockAnotherTINCTestCaseWithClassFixtures.setup_class, 2) self.assertEqual(len(tinc_test_case.test_case_scenario), 2) self.assertEqual(len(tinc_test_case.test_case_scenario[0][0]), 4) self.assertEqual(len(tinc_test_case.test_case_scenario[1][0]), 4) self.assertEqual(len(result.failures), 0)
def test_sanity_run_failure(self): tinc_test_case = MockScenarioTestCase('test_failure') tinc_test_case.__class__.__unittest_skip__ = False results = TestResult() tinc_test_case.run(results) self.assertEqual(len(results.failures), 1) self.assertEqual(len(tinc_test_case.test_case_scenario), 3)
def _testrun(self): """ Start a Testrun and populate the Publishers @rtype: C{unittest.TestResult} @return: A TestResult """ testrun_result = TestResult() try: publishers = self._publishers testrun = Testrun(self.is_hw_enabled, self.is_host_enabled, self.is_chroot_enabled) taskrunner = self.taskrunner #FIXME: Cheap hack to make testable testrun.run_test = taskrunner.run testrun_result.addSuccess(TestCase)if testrun.run() else \ testrun_result.addFailure(TestCase, (None, None, None)) except Exception, err: er_type, value, traceback = sys.exc_info() LOG.error(str(value) or "Testrun Error", exc_info=err) publishers.set_exception(value) testrun_result.addError(TestCase, (er_type, value, traceback)) if DEBUG: raise
def main(): __DIR__ = os.path.dirname(os.path.abspath(__file__)) USE_XML_RUNNER = True # target: path to the directory used to store the generated XML files. # terminal_path: path to the file used to store the standard output of the # test execution. top_directory = os.path.join(__DIR__, 'test', 'my_package') target = os.path.join(gettempdir(), 'result') terminal_path = os.path.join(gettempdir(), 'terminal') print(f'XML files will be generated under the directory "{target}".') print(f'Path to the terminal file: "{terminal_path}".') with open(terminal_path, 'w') as terminal: test_suite: TestSuite = defaultTestLoader.discover( start_dir=f'{top_directory}', top_level_dir=__DIR__, pattern='*_test.py') print(f'Number of unit tests: {test_suite.countTestCases()}') if USE_XML_RUNNER: test_result = xmlrunner.XMLTestRunner( output=target, verbosity=0, stream=terminal).run(test_suite) else: result = TestResult() test_suite.run(result) test_case: TestCase message: str for test_case, message in result.failures: print(test_case.id() + ':') print('\n'.join([f'\t{m}' for m in message.split('\n')]))
def test_unsuccessfulExit(self): """ Verify that an unsuccessful exit status results in an error. """ result = TestResult() self.case.createSource = lambda testMethod: "throw new TypeError();" self.case.run(result) self.assertEqual(len(result.errors), 1)
def test_missingJavaScriptClass(self): """ If a JavaScript class required by the test code is unavailable, an error is added to the result object by L{JavaScriptTestCase.run}. """ result = TestResult() self.case.testMethod = lambda: "Nevow.Test.NoSuchModule" self.case.run(result) self.assertEqual(len(result.errors), 1)
def test_add_a_test(self): from .fake import FakeTestCase suite = TimingSuite() result = TestResult() suite.addTest(FakeTestCase('test_slow_thing')) suite.addTest(FakeTestCase('test_setup_class_was_run')) suite.run(result) self.assertEquals(len(suite._tests), 2) self.assertEquals(len(result.errors), 0)
def test_serial_concurrency_in_same_execution(self): tinc_test_case = MockScenarioTestCase('test_serial_concurrency_in_same_execution') tinc_test_case.__class__.__unittest_skip__ = False result = TestResult() tinc_test_case.run(result) self.assertEqual(len(tinc_test_case.test_case_scenario), 2) self.assertEqual(len(tinc_test_case.test_case_scenario[0][0]), 2) self.assertEqual(len(tinc_test_case.test_case_scenario[1][0]), 2) self.assertEqual(len(result.failures), 0)
def test_assert_the_real_render_template_is_restored(self): test = TestNotRenderTemplates('test_assert_not_process_the_template') test_result = TestResult() test(test_result) assert test_result.wasSuccessful() response = self.client.get("/template/") assert len(response.data) > 0
def test_data_cleaned_up(self): user_collection.remove() company_collection.remove() loader = TestLoader() suite = loader.loadTestsFromTestCase(TestUnittestSupport.TestTestCase) result = TestResult() suite.run(result) self.assertTrue(result.wasSuccessful(), result) self.assertEquals(user_collection.count(), 0) self.assertEquals(company_collection.count(), 0)
def test_unsuccessfulExit(self): """ Verify that an unsuccessful exit status results in an error. """ result = TestResult() self.case.createSource = lambda testMethod: "throw new TypeError();" self.case.run(result) self.assertEqual(len(result.errors), 1) self.assertTrue(result.errors[0][1].startswith( 'Exception: JavaScript interpreter had error exit: '))
def test_serial_execution_order(self): """ Test whether tests added to a serial step are executed in the same order in which they are added """ tinc_test_case = MockScenarioTestCase('test_serial_execution_order') tinc_test_case.__class__.__unittest_skip__ = False result = TestResult() tinc_test_case.run(result) self.assertEqual(len(tinc_test_case.test_case_scenario), 1) self.assertEqual(len(tinc_test_case.test_case_scenario[0][0]), 2) self.assertEqual(len(result.failures), 0)
def run_tests(): result = TestResult() print(TermColor.BOLD + 'Running tests ... ' + TermColor.ENDC, end='') defaultTestLoader.discover('.').run(result) if result.wasSuccessful(): print(TermColor.OKGREEN + 'Passed' + TermColor.ENDC) else: print(TermColor.FAIL + 'Failed' + TermColor.ENDC) print() return result.wasSuccessful()
def test_add_a_test(self): from .fake import FakeTestCase suite = TimingSuite() result = TestResult() suite.addTest(FakeTestCase('test_slow_thing')) suite.addTest(FakeTestCase('test_setup_class_was_run')) with patch.object(suite, 'save_test_time') as mock: suite.run(result) self.assertEquals(len(suite._tests), 2) self.assertEquals(len(result.errors), 0) self.assertEqual(mock.call_count, 2)
def test_timing_is_correct_when_freezegun_sets_time_in_future(self): from .fake import FakeFrozenInFutureTestCase suite = TimingSuite() result = TestResult() suite.addTest( FakeFrozenInFutureTestCase( 'test_this_should_not_have_very_long_duration')) suite.run(result) test_name = str(suite._tests[0]) self.assertTrue(TIMINGS[test_name] > 0) self.assertTrue(TIMINGS[test_name] < 1)
def test_scenario_with_concurrency_fail(self): # make scenario test fail at first error tinc_test_case = MockScenarioTestCase('test_scenario_with_concurrency') tinc_test_case.__class__.__unittest_skip__ = False results = TestResult() tinc_test_case.run(results) # there are 3 tests; 1st one should fail and return self.assertEqual(results.testsRun, 1) self.assertEqual(len(results.failures), 1) self.assertEqual(len(tinc_test_case.test_case_scenario), 3) self.assertEqual(len(tinc_test_case.test_case_scenario[0][0]), 1)
def test_failure_with_no_fail_fast(self): # make sure we do not exit at the first failure and continue till the end tinc_test_case = MockScenarioTestCase('test_failure_with_no_fail_fast') tinc_test_case.__class__.__unittest_skip__ = False result = TestResult() tinc_test_case.run(result) self.assertEqual(result.testsRun, 1) self.assertEqual(len(result.failures), 1) # The failure message should include test_04 here self.assertTrue('MockTestCase3.test_03' in result.failures[0][1]) self.assertTrue('MockTestCase3.test_04' in result.failures[0][1]) self.assertEqual(len(tinc_test_case.test_case_scenario), 4)
def test_relative_imports(self): """ Test whether relative imports work properly. Scenarios can be added relative to the ScenarioTestCase by prefixing the test case name with '.' """ tinc_test_case = MockScenarioTestCase('test_relative_imports') tinc_test_case.__class__.__unittest_skip__ = False result = TestResult() tinc_test_case.run(result) self.assertEqual(len(tinc_test_case.test_case_scenario), 1) self.assertEqual(len(tinc_test_case.test_case_scenario[0][0]), 1) self.assertEqual(len(result.failures), 0)
def test_flushed(self): """ Any warnings emitted by a test which are flushed are not emitted to the Python warning system. """ result = TestResult() case = Mask.MockTests('test_flushed') output = StringIO() monkey = self.patch(sys, 'stdout', output) case.run(result) monkey.restore() self.assertEqual(output.getvalue(), "")
def test_signalledExit(self): """ An error should be reported if the JavaScript interpreter exits because it received a signal. """ def stubFinder(): return FilePath(__file__).sibling('segfault.py').path self.case.findJavascriptInterpreter = stubFinder self.case.createSource = lambda testMethod: "" result = TestResult() self.case.run(result) self.assertEqual(len(result.errors), 1)
def test_timing_is_correct_when_freezegun_sets_time_in_future(self): from .fake import FakeFrozenInFutureTestCase suite = TimingSuite() result = TestResult() suite.addTest( FakeFrozenInFutureTestCase( 'test_this_should_not_have_very_long_duration')) with patch.object(suite, 'save_test_time') as mock: suite.run(result) test_name = str(suite._tests[0]) self.assertEqual(mock.call_args_list[0][0][0], test_name) self.assertTrue(mock.call_args_list[0][0][1] > 0) self.assertTrue(mock.call_args_list[0][0][1] < 1)