def __init__(self, stream=None, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None, warnings=None, *, tb_locals=False, process_number=multiprocessing.cpu_count(), result_collector_class=None): if stream is None: stream = sys.stderr self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = failfast self.buffer = buffer self.tb_locals = tb_locals self.warnings = warnings self.process_number = process_number if resultclass is not None: if isinstance(resultclass, collections.Iterable): self.resultclass = resultclass else: self.resultclass = (resultclass, ) if result_collector_class is not None: self.result_collector_class = result_collector_class
def _make_result(verbose, failfast): """Creates a TextTestResult object that writes a stream to a StringIO""" stream = _WritelnDecorator(StringIO()) result = unittest.TextTestResult(stream, True, verbose) result.buffer = False result.failfast = failfast return result
def test_fixture_context_multiple_names_no_common_ancestors(self): stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, 2) wd = os.path.join(support, 'ltfn') l = loader.TestLoader(workingDir=wd) suite = l.loadTestsFromNames( ['test_pak1.test_mod', 'test_pak2:test_two_two', 'test_mod']) print suite suite(res) res.printErrors() print stream.getvalue() assert not res.errors, res.errors assert not res.failures, res.failures assert 'state' in sys.modules, \ "Context not load state module" m = sys.modules['state'] print "state", m.called expect = ['test_pak1.setup', 'test_pak1.test_mod.setup', 'test_pak1.test_mod.test_one_mod_one', 'test_pak1.test_mod.teardown', 'test_pak1.teardown', 'test_pak2.setup', 'test_pak2.test_two_two', 'test_pak2.teardown', 'test_mod.setup', 'test_mod.test_mod', 'test_mod.teardown'] self.assertEqual(m.called, expect, diff(expect, m.called))
def test_fixture_context_multiple_names_no_common_ancestors(self): stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, 2) wd = os.path.join(support, 'ltfn') l = loader.TestLoader(workingDir=wd) suite = l.loadTestsFromNames( ['test_pak1.test_mod', 'test_pak2:test_two_two', 'test_mod']) print(suite) suite(res) res.printErrors() print(stream.getvalue()) assert not res.errors, res.errors assert not res.failures, res.failures assert 'state' in sys.modules, \ "Context not load state module" m = sys.modules['state'] print("state", m.called) expect = [ 'test_pak1.setup', 'test_pak1.test_mod.setup', 'test_pak1.test_mod.test_one_mod_one', 'test_pak1.test_mod.teardown', 'test_pak1.teardown', 'test_pak2.setup', 'test_pak2.test_two_two', 'test_pak2.teardown', 'test_mod.setup', 'test_mod.test_mod', 'test_mod.teardown' ] self.assertEqual(m.called, expect, diff(expect, m.called))
def makeResult(): stream = _WritelnDecorator(StringIO()) result = resultClass(stream, descriptions=1, verbosity=config.verbosity, config=config) plug_result = config.plugins.prepareTestResult(result) if plug_result: return plug_result return result
def __init__( self, stream=None, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None, warnings=None, *, tb_locals=False, process_number=multiprocessing.cpu_count(), result_collector_class=None ): if stream is None: stream = sys.stderr self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = failfast self.buffer = buffer self.tb_locals = tb_locals self.warnings = warnings self.process_number = process_number if resultclass is not None: if isinstance(resultclass, collections.Iterable): self.resultclass = resultclass else: self.resultclass = (resultclass,) if result_collector_class is not None: self.result_collector_class = result_collector_class
def set_stream(self, streaming): """Set the streaming boolean option to stream TAP directly to stdout. The test runner default output will be suppressed in favor of TAP. """ self.stream = _WritelnDecorator(open(os.devnull, "w")) _tracker.streaming = streaming _tracker.stream = sys.stdout
def set_stream(self, streaming): """Set the streaming boolean option to stream TAP directly to stdout. The test runner default output will be suppressed in favor of TAP. """ self.stream = _WritelnDecorator(open(os.devnull, 'w')) _tracker.streaming = streaming _tracker.stream = sys.stdout
def prepareTestResult(self, result): result.__failures = [] result.__errors = [] result.__tests_run = 0 result.__start_time = time.time() result.stream = _WritelnDecorator(open(os.devnull, 'w')) self._result = result
def run_one(model): # type: (str) -> str """ Run the tests for a single model, printing the results to stdout. *model* can by a python file, which is handy for checking user defined plugin models. """ # Note that running main() directly did not work from within the # wxPython pycrust console. Instead of the results appearing in the # window they were printed to the underlying console. from unittest.runner import TextTestResult, _WritelnDecorator # Build a object to capture and print the test results stream = _WritelnDecorator(StringIO()) # Add writeln() method to stream verbosity = 2 descriptions = True result = TextTestResult(stream, descriptions, verbosity) # Build a test suite containing just the model loaders = ['opencl'] if core.HAVE_OPENCL else ['dll'] models = [model] try: suite = make_suite(loaders, models) except Exception: import traceback stream.writeln(traceback.format_exc()) return # Run the test suite suite.run(result) # Print the failures and errors for _, tb in result.errors: stream.writeln(tb) for _, tb in result.failures: stream.writeln(tb) # Warn if there are no user defined tests. # Note: the test suite constructed above only has one test in it, which # runs through some smoke tests to make sure the model runs, then runs # through the input-output pairs given in the model definition file. To # check if any such pairs are defined, therefore, we just need to check if # they are in the first test of the test suite. We do this with an # iterator since we don't have direct access to the list of tests in the # test suite. for test in suite: if not test.info.tests: stream.writeln("Note: %s has no user defined tests." % model) break else: stream.writeln( "Note: no test suite created --- this should never happen") output = stream.getvalue() stream.close() return output
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None): self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = False self.buffer = buffer if resultclass is not None: self.resultclass = resultclass
def __init__(self, title, verbosity): super(NeoTestRunner, self).__init__( _WritelnDecorator(sys.stderr), False, verbosity) self._title = title self.modulesStats = {} self.failedImports = {} self.run_dict = defaultdict(int) self.time_dict = defaultdict(int) self.temp_directory = getTempDirectory()
def create_result_object(self): result = self.resultClass(_WritelnDecorator(StringIO()), descriptions=1, verbosity=self.config.verbosity, config=self.config) plug_result = self.config.plugins.prepareTestResult(result) if plug_result: return plug_result return result
def __init__(self, title, verbosity): super(NeoTestRunner, self).__init__(_WritelnDecorator(sys.stderr), False, verbosity) self._title = title self.modulesStats = {} self.failedImports = {} self.run_dict = defaultdict(int) self.time_dict = defaultdict(int) self.temp_directory = getTempDirectory()
def buildReport(self, add_status): subject, summary = self._buildSummary(add_status) body = StringIO() body.write(summary) for test in self.unexpectedSuccesses: body.write("UNEXPECTED SUCCESS: %s\n" % self.getDescription(test)) self.stream = _WritelnDecorator(body) self.printErrors() return subject, body.getvalue()
def check_model(model_info): # type: (ModelInfo) -> Tuple[bool, str] """ Run the tests for a single model, capturing the output. Returns success status and the output string. """ # Note that running main() directly did not work from within the # wxPython pycrust console. Instead of the results appearing in the # window they were printed to the underlying console. from unittest.runner import TextTestResult, _WritelnDecorator # Build a object to capture and print the test results stream = _WritelnDecorator(StringIO()) # Add writeln() method to stream verbosity = 2 descriptions = True result = TextTestResult(stream, descriptions, verbosity) # Build a test suite containing just the model loaders = ['opencl' if use_opencl() else 'cuda' if use_cuda() else 'dll'] suite = unittest.TestSuite() _add_model_to_suite(loaders, suite, model_info) # Warn if there are no user defined tests. # Note: the test suite constructed above only has one test in it, which # runs through some smoke tests to make sure the model runs, then runs # through the input-output pairs given in the model definition file. To # check if any such pairs are defined, therefore, we just need to check if # they are in the first test of the test suite. We do this with an # iterator since we don't have direct access to the list of tests in the # test suite. # In Qt5 suite.run() will clear all tests in the suite after running # with no way of retaining them for the test below, so let's check # for user tests before running the suite. for test in suite: if not test.info.tests: stream.writeln("Note: %s has no user defined tests." % model_info.name) break else: stream.writeln( "Note: no test suite created --- this should never happen") # Run the test suite suite.run(result) # Print the failures and errors for _, tb in result.errors: stream.writeln(tb) for _, tb in result.failures: stream.writeln(tb) output = stream.getvalue() stream.close() return result.wasSuccessful(), output
def test_mp_process_args_pickleable(): test = case.Test(T('runTest')) config = Config() config.multiprocess_workers = 2 config.multiprocess_timeout = 0.1 runner = multiprocess.MultiProcessTestRunner( stream=_WritelnDecorator(sys.stdout), verbosity=2, loaderClass=TestLoader, config=config) runner.run(test)
def test_mp_process_args_pickleable(): test = case.Test(T('runTest')) config = Config() config.multiprocess_workers = 2 config.multiprocess_timeout = 0.1 runner = multiprocess.MultiProcessTestRunner(stream=_WritelnDecorator( sys.stdout), verbosity=2, loaderClass=TestLoader, config=config) runner.run(test)
def create_result_object(self): result = self.resultClass( _WritelnDecorator(StringIO()), descriptions=1, verbosity=self.config.verbosity, config=self.config ) plug_result = self.config.plugins.prepareTestResult(result) if plug_result: return plug_result return result
def run(self): if (self._logfile): with open(self._logfile, r'w+') as stream: self._runner.stream = _WritelnDecorator(stream) return self._run() else: return self._run()
def buildReport(self, add_status): subject, summary = self._buildSummary(add_status) if self.stop_on_success: return subject, summary body = StringIO() body.write(summary) for test in self.unexpectedSuccesses: body.write("UNEXPECTED SUCCESS: %s\n" % self.getDescription(test)) self.stream = _WritelnDecorator(body) self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) return subject, body.getvalue()
def test_load_nonsense_name(self): ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx) suite = l.loadTestsFromName('fred!') res = _TextTestResult(stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print(res.errors) assert res.errors, "Expected errors but got none" assert not res.failures, res.failures
def test_load_nonsense_name(self): ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx) suite = l.loadTestsFromName('fred!') res = _TextTestResult( stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print res.errors assert res.errors, "Expected errors but got none" assert not res.failures, res.failures
def test_issue_269(self): """Test classes that raise exceptions in __init__ do not stop test run """ wdir = os.path.join(support, 'issue269') l = loader.TestLoader(workingDir=wdir) suite = l.loadTestsFromName('test_bad_class') res = _TextTestResult(stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print(res.errors) self.assertEqual(len(res.errors), 1) assert 'raise Exception("pow")' in res.errors[0][1]
def test_issue_269(self): """Test classes that raise exceptions in __init__ do not stop test run """ wdir = os.path.join(support, 'issue269') l = loader.TestLoader(workingDir=wdir) suite = l.loadTestsFromName('test_bad_class') res = _TextTestResult( stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print res.errors self.assertEqual(len(res.errors), 1) assert 'raise Exception("pow")' in res.errors[0][1]
def __init__(self, stream=None, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None, warnings=None): if stream is None: stream = sys.stderr self.stream = runner._WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = failfast self.buffer = buffer self.warnings = warnings if resultclass is not None: self.resultclass = resultclass
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None): self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = failfast self.buffer = buffer if resultclass is not None: self.resultclass = resultclass
def test_mp_process_args_pickleable(): # TODO(Kumar) this test needs to be more succint. # If you start seeing it timeout then perhaps we need to skip it again. # raise SkipTest('this currently gets stuck in poll() 90% of the time') test = case.Test(T('runTest')) config = Config() config.multiprocess_workers = 2 config.multiprocess_timeout = 5 runner = multiprocess.MultiProcessTestRunner( stream=_WritelnDecorator(sys.stdout), verbosity=10, loaderClass=TestLoader, config=config) runner.run(test)
def test_generator_with_closure(self): """Test that a generator test can employ a closure Issue #3. If the generator binds early, the last value of the closure will be seen for each generated test and the tests will fail. """ gen = os.path.join(support, 'gen') l = loader.TestLoader(workingDir=gen) suite = l.loadTestsFromName('test') res = _TextTestResult(stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) assert not res.errors self.assertEqual(res.testsRun, 5)
def test_failed_import(self): ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx) suite = l.loadTestsFromName('no_such_module.py') res = _TextTestResult( stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print res.errors res.printErrors() assert res.errors, "Expected errors but got none" assert not res.failures, res.failures assert res.testsRun == 1, \ "Expected to run 1 tests but ran %s" % res.testsRun
def test_failed_import_module_name(self): ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx) suite = l.loadTestsFromName('no_such_module') res = _TextTestResult(stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print(res.errors) res.printErrors() assert res.errors, "Expected errors but got none" assert not res.failures, res.failures err = res.errors[0][0].test.exc_class assert err is ImportError, \ "Expected import error, got %s" % err
def test_failed_import(self): ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx) suite = l.loadTestsFromName('no_such_module.py') res = _TextTestResult(stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print(res.errors) res.printErrors() assert res.errors, "Expected errors but got none" assert not res.failures, res.failures assert res.testsRun == 1, \ "Expected to run 1 tests but ran %s" % res.testsRun
def test_failed_import_module_name(self): ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx) suite = l.loadTestsFromName('no_such_module') res = _TextTestResult( stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) print res.errors res.printErrors() assert res.errors, "Expected errors but got none" assert not res.failures, res.failures err = res.errors[0][0].test.exc_class assert err is ImportError, \ "Expected import error, got %s" % err
def test_generator_with_closure(self): """Test that a generator test can employ a closure Issue #3. If the generator binds early, the last value of the closure will be seen for each generated test and the tests will fail. """ gen = os.path.join(support, 'gen') l = loader.TestLoader(workingDir=gen) suite = l.loadTestsFromName('test') res = _TextTestResult( stream=_WritelnDecorator(sys.stdout), descriptions=0, verbosity=1) suite(res) assert not res.errors self.assertEqual(res.testsRun, 5)
def test_deprecated_output(self): class TC(unittest.TestCase): def test(self): raise DeprecatedTest('deprecated me') stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, 1) sk = Deprecated() sk.prepareTestResult(res) test = TC('test') test(res) assert not res.errors, "Deprecated was not caught: %s" % res.errors assert res.deprecated res.printErrors() out = stream.getvalue() assert out assert out.strip() == "D" assert res.wasSuccessful()
def test_prepare_patches_result(self): stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, 1) sk = Skip() sk.prepareTestResult(res) res._orig_addError res._orig_printErrors res._orig_wasSuccessful res.skipped self.assertEqual(res.errorClasses, {SkipTest: (res.skipped, 'SKIP', False)}) # result w/out print works too res = unittest.TestResult() sk = Skip() sk.prepareTestResult(res) res._orig_addError res.skipped self.assertEqual(res.errorClasses, {SkipTest: (res.skipped, 'SKIP', False)})
def test_skip_output_verbose(self): class TC(unittest.TestCase): def test(self): raise SkipTest('skip me too') stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, verbosity=2) sk = Skip() sk.prepareTestResult(res) test = TC('test') test(res) assert not res.errors, "Skip was not caught: %s" % res.errors assert res.skipped res.printErrors() out = stream.getvalue() print out assert out assert ' ... SKIP' in out assert 'skip me too' in out
def test_skip_output(self): class TC(unittest.TestCase): def test(self): raise SkipTest('skip me') stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, 1) sk = Skip() sk.prepareTestResult(res) test = TC('test') test(res) assert not res.errors, "Skip was not caught: %s" % res.errors assert res.skipped res.printErrors() out = stream.getvalue() print out assert out assert out.strip() == "S" assert res.wasSuccessful()
def __init__(self, stream=sys.stderr, descriptions=None, verbosity=2): TestResult.__init__(self, _WritelnDecorator(stream), descriptions, verbosity) self.stdout0 = None self.stderr0 = None self.success_count = 0 self.failure_count = 0 self.error_count = 0 self.verbosity = verbosity self.s_time = 0 self.e_time = 0 self.spend_time = 0 self.times = [] # result is a list of result in 4 tuple # ( # result code (0: success; 1: fail; 2: error), # TestCase object, # Test output (byte string), # stack trace, # ) self.result = []
def test_deprecated_output_verbose(self): class TC(unittest.TestCase): def test(self): raise DeprecatedTest('deprecated me too') stream = _WritelnDecorator(StringIO()) res = _TextTestResult(stream, 0, verbosity=2) sk = Deprecated() sk.prepareTestResult(res) test = TC('test') test(res) assert not res.errors, "Deprecated was not caught: %s" % res.errors assert res.deprecated res.printErrors() out = stream.getvalue() print(out) assert out assert ' ... DEPRECATED' in out assert 'deprecated me too' in out
def __init__(self, title, verbosity, stop_on_success, readable_tid): super(NeoTestRunner, self).__init__(_WritelnDecorator(sys.stderr), False, verbosity) self._title = title self.stop_on_success = stop_on_success if readable_tid: from neo.lib import util from neo.lib.util import dump, p64, u64 from neo.master.transactions import TransactionManager def _nextTID(orig, tm, ttid=None, divisor=None): n = self._next_tid self._next_tid = n + 1 n = str(n).rjust(3, '-') if ttid: t = u64('T%s%s-' % (n, ttid[1:4])) m = (u64(ttid) - t) % divisor assert m < 211, (p64(t), divisor) t = p64(t + m) else: t = 'T%s----' % n assert tm._last_tid < t, (tm._last_tid, t) tm._last_tid = t return t self._readable_tid = ( Patch(self, 1, _next_tid=0), Patch(TransactionManager, _nextTID=_nextTID), Patch(util, 1, orig_dump=type(dump)(dump.__code__, dump.__globals__)), Patch(dump, __code__=(lambda s: s if type(s) is str and s.startswith( 'T') else orig_dump(s)).__code__), ) self.modulesStats = {} self.failedImports = {} self.run_dict = defaultdict(int) self.time_dict = defaultdict(int) self.temp_directory = getTempDirectory()
def setup_log(self): mode = "w" if six.PY3 else "wb" self.log_file = _WritelnDecorator(open(self.log_path, mode)) self.log_file.writeln(" ".join(quote(a) for a in self.argv)) self.log_file.writeln(str(self.start)) self.result = TextTestResult(self.log_file, True, 0)
def __init__(self): TextTestRunner.__init__(self, stream=_WritelnDecorator(sys.stdout), verbosity=2)
def prepareTestResult(self, result): try: from unittest.runner import _WritelnDecorator # Python 2.7 except ImportError: from unittest import _WritelnDecorator result.stream = _WritelnDecorator(open(os.devnull, 'w'))
def addFailure(self, test, err): current_stream = self.stream self.stream = _WritelnDecorator(sys.stderr) TextTestResult.addFailure(self, test, err) self.stream = current_stream
def printErrors(self): current_stream = self.stream self.stream = _WritelnDecorator(sys.stderr) TextTestResult.printErrors(self) self.stream = current_stream
def setup_log(self): self.log_file = _WritelnDecorator(open(self.log_path, "w")) self.log_file.writeln(" ".join(quote(a) for a in self.argv)) self.log_file.writeln(str(self.start)) self.result = TextTestResult(self.log_file, True, 0)
def flush(self): del(self.stream) self.stream = runner._WritelnDecorator(cStringIO.StringIO())