def addFailure(self, test, error): # pylint: disable-msg=W0221 self._wassuccess = False #TestProtocolClient does not call TestResult.addFailure test.passed = False fixTestCase(test) error, details = self._getArgs(test, error) TestProtocolClient.addFailure(self, test, error, details=details)
def test_warnings(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.stopTest(test1) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setupStep(subunit.SubunitShellCommand(command='test')) self.expectCommands( ExpectShell(workdir='wkdir', command="test") + Expect.log('stdio', stdout=stream.getvalue()) + 0) self.expectOutcome( result=SUCCESS, # N.B. not WARNINGS state_string="shell 1 test passed") # note that the warnings list is ignored.. self.expectLogfile( 'warnings', '''\ error: test2 [ Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '_error2' ] ''') return self.runStep()
def addError(self, test, error): # pylint: disable-msg=W0221 """Overrides normal addError to add support for errorClasses. If the exception is a registered class, the error will be added to the list for that class, not errors. """ fixTestCase(test) #manually call startTest if it's not already called if not getattr(test, '_subunit_started', False): self.startTest(test) ecls, evt, tbk = error # pylint: disable-msg=W0612 # pylint: disable-msg=W0612 for cls, (storage, label, isfail) in self.errorClasses.items(): if isclass(ecls) and issubclass(ecls, cls): if not isfail: reason = _exception_detail(evt) if reason and self.useDetails: details = {"reason":TextContent(reason)} reason = None else: details = None self._addNonFailOutcome(label.lower(), test, reason=reason, details=details) return self._wassuccess = False error, details = self._getArgs(test, error) test.passed = False TestProtocolClient.addError(self, test, error, details=details)
def run(self, test): "Run the given test case or test suite." result = TestProtocolClient(self.stream) result = AutoTimingTestResultDecorator(result) if self.failfast is not None: result.failfast = self.failfast test(result) return result
def _addOutcome(self, outcome, test, *args, **kwargs): if self.descriptions: if not hasattr(test, '_id_') and test.shortDescription(): test._id_ = test.id def fakeid(): return test.shortDescription() or test.id() test.id = fakeid TestProtocolClient._addOutcome(self, outcome, test, *args, **kwargs) if self.descriptions and hasattr(test, '_id_'): test.id = test._id_
def __init__(self, stream, descriptions, config=None, errorClasses=None, #kwargs capture all other arguments, including unused #ones: verbosity **kwargs): if errorClasses is None: errorClasses = {} self.errorClasses = errorClasses #if config is None: # config = Config() self.config = config self.descriptions = descriptions self.stream = stream #this is to make multiprocess plugin happy self.useDetails = kwargs.get("useDetails", False) self._wassuccess = True TestProtocolClient.__init__(self, stream)
def __init__(self, stream=sys.stdout, tbformat='default', realtime=False, publisher=None): """ Construct a L{SubunitReporter}. @param stream: A file-like object representing the stream to print output to. Defaults to stdout. @param tbformat: The format for tracebacks. Ignored, since subunit always uses Python's standard format. @param realtime: Whether or not to print exceptions in the middle of the test results. Ignored, since subunit always does this. @param publisher: The log publisher which will be preserved for reporting events. Ignored, as it's not relevant to subunit. """ if TestProtocolClient is None: raise Exception("Subunit not available") self._subunit = TestProtocolClient(stream) self._successful = True
def test_warnings(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.stopTest(test1) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setup_step(subunit.SubunitShellCommand(command='test')) self.expect_commands( ExpectShell(workdir='wkdir', command="test").stdout(stream.getvalue()).exit(0)) self.expect_outcome( result=SUCCESS, # N.B. not WARNINGS state_string="shell 1 test passed") # note that the warnings list is ignored.. self.expect_log_file('warnings', re.compile(r'''error: test2 \[.* ValueError: invalid literal for int\(\) with base 10: '_error2' \] ''', re.MULTILINE | re.DOTALL)) # noqa pylint: disable=line-too-long return self.run_step()
def do_fork(suite): """Take suite and start up multiple runners by forking (Unix only). :param suite: TestSuite object. :return: An iterable of TestCase-like objects which can each have run(result) called on them to feed tests to result. """ result = [] test_blocks = partition_tests(suite, concurrency_num) # Clear the tests from the original suite so it doesn't keep them alive suite._tests[:] = [] for process_tests in test_blocks: process_suite = unittest.TestSuite(process_tests) # Also clear each split list so new suite has only reference process_tests[:] = [] c2pread, c2pwrite = os.pipe() pid = os.fork() if pid == 0: try: stream = os.fdopen(c2pwrite, 'wb', 1) os.close(c2pread) # Leave stderr and stdout open so we can see test noise # Close stdin so that the child goes away if it decides to # read from stdin (otherwise its a roulette to see what # child actually gets keystrokes for pdb etc). sys.stdin.close() subunit_result = AutoTimingTestResultDecorator( TestProtocolClient(stream)) process_suite.run(subunit_result) except: # Try and report traceback on stream, but exit with error # even if stream couldn't be created or something else # goes wrong. The traceback is formatted to a string and # written in one go to avoid interleaving lines from # multiple failing children. try: stream.write(traceback.format_exc()) finally: os._exit(1) os._exit(0) else: os.close(c2pwrite) stream = os.fdopen(c2pread, 'rb', 1) test = ProtocolTestCase(stream) result.append(test) return result
def __init__(self, stream=sys.stdout, tbformat="default", realtime=False, publisher=None): """ Construct a L{SubunitReporter}. @param stream: A file-like object representing the stream to print output to. Defaults to stdout. @param tbformat: The format for tracebacks. Ignored, since subunit always uses Python's standard format. @param realtime: Whether or not to print exceptions in the middle of the test results. Ignored, since subunit always does this. @param publisher: The log publisher which will be preserved for reporting events. Ignored, as it's not relevant to subunit. """ if TestProtocolClient is None: raise Exception("Subunit not available") self._subunit = TestProtocolClient(stream) self._successful = True
def __init__(self, repository, partial=False): # XXX: Perhaps should factor into a decorator and use an unaltered # TestProtocolClient. self._repository = repository fd, name = tempfile.mkstemp(dir=self._repository.base) self.fname = name stream = os.fdopen(fd, 'wb') self.partial = partial # The time take by each test, flushed at the end. self._times = {} self._test_start = None self._time = None subunit_client = testtools.StreamToExtendedDecorator( TestProtocolClient(stream)) self.hook = testtools.CopyStreamResult([ subunit_client, testtools.StreamToDict(self._handle_test)]) self._stream = stream
def test_error(self): stream = io.BytesIO() client = TestProtocolClient(stream) test = FakeTest(id='test1') client.startTest(test) client.addError(test, create_error('error1')) client.stopTest(test) self.setup_step(subunit.SubunitShellCommand(command='test')) self.expect_commands( ExpectShell(workdir='wkdir', command="test").stdout(stream.getvalue()).exit(0)) self.expect_outcome( result=FAILURE, state_string="shell Total 1 test(s) 1 error (failure)") self.expect_log_file('problems', re.compile(r'''test1 testtools.testresult.real._StringException:.*ValueError: invalid literal for int\(\) with base 10: '_error1' .*''', re.MULTILINE | re.DOTALL)) # noqa pylint: disable=line-too-long return self.run_step()
def test_success(self): stream = io.BytesIO() client = TestProtocolClient(stream) test = FakeTest(id='test1') client.startTest(test) client.stopTest(test) self.setup_step(subunit.SubunitShellCommand(command='test')) self.expect_commands( ExpectShell(workdir='wkdir', command="test").stdout(stream.getvalue()).exit(0)) self.expect_outcome(result=SUCCESS, state_string="shell 1 test passed") return self.run_step()
def test_error(self): stream = io.BytesIO() client = TestProtocolClient(stream) test = FakeTest(id='test1') client.startTest(test) client.addError(test, create_error('error1')) client.stopTest(test) self.setupStep(subunit.SubunitShellCommand(command='test')) self.expectCommands( ExpectShell(workdir='wkdir', command="test") + Expect.log('stdio', stdout=stream.getvalue()) + 0) self.expectOutcome( result=FAILURE, state_string="shell Total 1 test(s) 1 error (failure)") self.expectLogfile( 'problems', '''\ test1 testtools.testresult.real._StringException: Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '_error1' ''') return self.runStep()
def fork_for_tests(concurrency_num, suite): result = [] if 'BUILDDIR' in os.environ: selftestdir = get_test_layer() test_blocks = partition_tests(suite, concurrency_num) # Clear the tests from the original suite so it doesn't keep them alive suite._tests[:] = [] totaltests = sum(len(x) for x in test_blocks) for process_tests in test_blocks: numtests = len(process_tests) process_suite = unittest.TestSuite(process_tests) # Also clear each split list so new suite has only reference process_tests[:] = [] c2pread, c2pwrite = os.pipe() # Clear buffers before fork to avoid duplicate output sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid == 0: ourpid = os.getpid() try: newbuilddir = None stream = os.fdopen(c2pwrite, 'wb', 1) os.close(c2pread) (builddir, newbuilddir) = suite.setupfunc("-st-" + str(ourpid), selftestdir, process_suite) # Leave stderr and stdout open so we can see test noise # Close stdin so that the child goes away if it decides to # read from stdin (otherwise its a roulette to see what # child actually gets keystrokes for pdb etc). newsi = os.open(os.devnull, os.O_RDWR) os.dup2(newsi, sys.stdin.fileno()) subunit_client = TestProtocolClient(stream) # Force buffering of stdout/stderr so the console doesn't get corrupted by test output # as per default in parent code subunit_client.buffer = True subunit_result = AutoTimingTestResultDecorator(subunit_client) process_suite.run( ExtraResultsEncoderTestResult(subunit_result)) if ourpid != os.getpid(): os._exit(0) if newbuilddir: removebuilddir(newbuilddir) except: # Don't do anything with process children if ourpid != os.getpid(): os._exit(1) # Try and report traceback on stream, but exit with error # even if stream couldn't be created or something else # goes wrong. The traceback is formatted to a string and # written in one go to avoid interleaving lines from # multiple failing children. try: stream.write(traceback.format_exc().encode('utf-8')) except: sys.stderr.write(traceback.format_exc()) finally: if newbuilddir: removebuilddir(newbuilddir) stream.flush() os._exit(1) stream.flush() os._exit(0) else: os.close(c2pwrite) stream = os.fdopen(c2pread, 'rb', 1) test = ProtocolTestCase(stream) result.append((test, numtests)) return result, totaltests
class SubunitReporter: """ Reports test output via Subunit. @ivar _subunit: The subunit protocol client that we are wrapping. @ivar _successful: An internal variable, used to track whether we have received only successful results. @since: 10.0 """ testsRun = None def __init__(self, stream=sys.stdout, tbformat="default", realtime=False, publisher=None): """ Construct a L{SubunitReporter}. @param stream: A file-like object representing the stream to print output to. Defaults to stdout. @param tbformat: The format for tracebacks. Ignored, since subunit always uses Python's standard format. @param realtime: Whether or not to print exceptions in the middle of the test results. Ignored, since subunit always does this. @param publisher: The log publisher which will be preserved for reporting events. Ignored, as it's not relevant to subunit. """ if TestProtocolClient is None: raise Exception("Subunit not available") self._subunit = TestProtocolClient(stream) self._successful = True def done(self): """ Record that the entire test suite run is finished. We do nothing, since a summary clause is irrelevant to the subunit protocol. """ pass @property def shouldStop(self): """ Whether or not the test runner should stop running tests. """ return self._subunit.shouldStop def stop(self): """ Signal that the test runner should stop running tests. """ return self._subunit.stop() def wasSuccessful(self): """ Has the test run been successful so far? @return: C{True} if we have received no reports of errors or failures, C{False} otherwise. """ # Subunit has a bug in its implementation of wasSuccessful, see # https://bugs.edge.launchpad.net/subunit/+bug/491090, so we can't # simply forward it on. return self._successful def startTest(self, test): """ Record that C{test} has started. """ return self._subunit.startTest(test) def stopTest(self, test): """ Record that C{test} has completed. """ return self._subunit.stopTest(test) def addSuccess(self, test): """ Record that C{test} was successful. """ return self._subunit.addSuccess(test) def addSkip(self, test, reason): """ Record that C{test} was skipped for C{reason}. Some versions of subunit don't have support for addSkip. In those cases, the skip is reported as a success. @param test: A unittest-compatible C{TestCase}. @param reason: The reason for it being skipped. The C{str()} of this object will be included in the subunit output stream. """ addSkip = getattr(self._subunit, "addSkip", None) if addSkip is None: self.addSuccess(test) else: self._subunit.addSkip(test, reason) def addError(self, test, err): """ Record that C{test} failed with an unexpected error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addError(test, util.excInfoOrFailureToExcInfo(err)) def addFailure(self, test, err): """ Record that C{test} failed an assertion with the error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addFailure(test, util.excInfoOrFailureToExcInfo(err)) def addExpectedFailure(self, test, failure, todo=None): """ Record an expected failure from a test. Some versions of subunit do not implement this. For those versions, we record a success. """ failure = util.excInfoOrFailureToExcInfo(failure) addExpectedFailure = getattr(self._subunit, "addExpectedFailure", None) if addExpectedFailure is None: self.addSuccess(test) else: addExpectedFailure(test, failure) def addUnexpectedSuccess(self, test, todo=None): """ Record an unexpected success. Since subunit has no way of expressing this concept, we record a success on the subunit stream. """ # Not represented in pyunit/subunit. self.addSuccess(test)
class SubunitReporter(object): """ Reports test output via Subunit. @ivar _subunit: The subunit protocol client that we are wrapping. @ivar _successful: An internal variable, used to track whether we have received only successful results. @since: 10.0 """ def __init__(self, stream=sys.stdout, tbformat='default', realtime=False, publisher=None): """ Construct a L{SubunitReporter}. @param stream: A file-like object representing the stream to print output to. Defaults to stdout. @param tbformat: The format for tracebacks. Ignored, since subunit always uses Python's standard format. @param realtime: Whether or not to print exceptions in the middle of the test results. Ignored, since subunit always does this. @param publisher: The log publisher which will be preserved for reporting events. Ignored, as it's not relevant to subunit. """ if TestProtocolClient is None: raise Exception("Subunit not available") self._subunit = TestProtocolClient(stream) self._successful = True def done(self): """ Record that the entire test suite run is finished. We do nothing, since a summary clause is irrelevant to the subunit protocol. """ pass def shouldStop(self): """ Whether or not the test runner should stop running tests. """ return self._subunit.shouldStop shouldStop = property(shouldStop) def stop(self): """ Signal that the test runner should stop running tests. """ return self._subunit.stop() def wasSuccessful(self): """ Has the test run been successful so far? @return: C{True} if we have received no reports of errors or failures, C{False} otherwise. """ # Subunit has a bug in its implementation of wasSuccessful, see # https://bugs.edge.launchpad.net/subunit/+bug/491090, so we can't # simply forward it on. return self._successful def startTest(self, test): """ Record that C{test} has started. """ return self._subunit.startTest(test) def stopTest(self, test): """ Record that C{test} has completed. """ return self._subunit.stopTest(test) def addSuccess(self, test): """ Record that C{test} was successful. """ return self._subunit.addSuccess(test) def addSkip(self, test, reason): """ Record that C{test} was skipped for C{reason}. Some versions of subunit don't have support for addSkip. In those cases, the skip is reported as a success. @param test: A unittest-compatible C{TestCase}. @param reason: The reason for it being skipped. The C{str()} of this object will be included in the subunit output stream. """ addSkip = getattr(self._subunit, 'addSkip', None) if addSkip is None: self.addSuccess(test) else: self._subunit.addSkip(test, reason) def addError(self, test, err): """ Record that C{test} failed with an unexpected error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addError( test, util.excInfoOrFailureToExcInfo(err)) def addFailure(self, test, err): """ Record that C{test} failed an assertion with the error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addFailure( test, util.excInfoOrFailureToExcInfo(err)) def addExpectedFailure(self, test, failure, todo): """ Record an expected failure from a test. Some versions of subunit do not implement this. For those versions, we record a success. """ failure = util.excInfoOrFailureToExcInfo(failure) addExpectedFailure = getattr(self._subunit, 'addExpectedFailure', None) if addExpectedFailure is None: self.addSuccess(test) else: addExpectedFailure(test, failure) def addUnexpectedSuccess(self, test, todo): """ Record an unexpected success. Since subunit has no way of expressing this concept, we record a success on the subunit stream. """ # Not represented in pyunit/subunit. self.addSuccess(test)
def run(self, test): "Run the given test case or test suite." result = TestProtocolClient(self.stream) result = AutoTimingTestResultDecorator(result) test(result) return result
def fork_for_tests(concurrency_num, suite): result = [] test_blocks = partition_tests(suite, concurrency_num) # Clear the tests from the original suite so it doesn't keep them alive suite._tests[:] = [] totaltests = sum(len(x) for x in test_blocks) for process_tests in test_blocks: numtests = len(process_tests) process_suite = unittest.TestSuite(process_tests) # Also clear each split list so new suite has only reference process_tests[:] = [] c2pread, c2pwrite = os.pipe() # Clear buffers before fork to avoid duplicate output sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid == 0: ourpid = os.getpid() try: newbuilddir = None stream = os.fdopen(c2pwrite, 'wb', 1) os.close(c2pread) # Create a new separate BUILDDIR for each group of tests if 'BUILDDIR' in os.environ: builddir = os.environ['BUILDDIR'] newbuilddir = builddir + "-st-" + str(ourpid) selftestdir = os.path.abspath(builddir + "/../meta-selftest") newselftestdir = newbuilddir + "/meta-selftest" bb.utils.mkdirhier(newbuilddir) oe.path.copytree(builddir + "/conf", newbuilddir + "/conf") oe.path.copytree(builddir + "/cache", newbuilddir + "/cache") oe.path.copytree(selftestdir, newselftestdir) for e in os.environ: if builddir in os.environ[e]: os.environ[e] = os.environ[e].replace(builddir, newbuilddir) subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True) # Tried to used bitbake-layers add/remove but it requires recipe parsing and hence is too slow subprocess.check_output("sed %s/conf/bblayers.conf -i -e 's#%s#%s#g'" % (newbuilddir, selftestdir, newselftestdir), cwd=newbuilddir, shell=True) os.chdir(newbuilddir) for t in process_suite: if not hasattr(t, "tc"): continue cp = t.tc.config_paths for p in cp: if selftestdir in cp[p] and newselftestdir not in cp[p]: cp[p] = cp[p].replace(selftestdir, newselftestdir) if builddir in cp[p] and newbuilddir not in cp[p]: cp[p] = cp[p].replace(builddir, newbuilddir) # Leave stderr and stdout open so we can see test noise # Close stdin so that the child goes away if it decides to # read from stdin (otherwise its a roulette to see what # child actually gets keystrokes for pdb etc). newsi = os.open(os.devnull, os.O_RDWR) os.dup2(newsi, sys.stdin.fileno()) subunit_client = TestProtocolClient(stream) # Force buffering of stdout/stderr so the console doesn't get corrupted by test output # as per default in parent code subunit_client.buffer = True subunit_result = AutoTimingTestResultDecorator(subunit_client) process_suite.run(subunit_result) if ourpid != os.getpid(): os._exit(0) if newbuilddir: removebuilddir(newbuilddir) except: # Don't do anything with process children if ourpid != os.getpid(): os._exit(1) # Try and report traceback on stream, but exit with error # even if stream couldn't be created or something else # goes wrong. The traceback is formatted to a string and # written in one go to avoid interleaving lines from # multiple failing children. try: stream.write(traceback.format_exc().encode('utf-8')) except: sys.stderr.write(traceback.format_exc()) finally: if newbuilddir: removebuilddir(newbuilddir) stream.flush() os._exit(1) stream.flush() os._exit(0) else: os.close(c2pwrite) stream = os.fdopen(c2pread, 'rb', 1) test = ProtocolTestCase(stream) result.append((test, numtests)) return result, totaltests
def fork_for_tests(concurrency_num, suite): result = [] if 'BUILDDIR' in os.environ: selftestdir = get_test_layer() test_blocks = partition_tests(suite, concurrency_num) # Clear the tests from the original suite so it doesn't keep them alive suite._tests[:] = [] totaltests = sum(len(x) for x in test_blocks) for process_tests in test_blocks: numtests = len(process_tests) process_suite = unittest.TestSuite(process_tests) # Also clear each split list so new suite has only reference process_tests[:] = [] c2pread, c2pwrite = os.pipe() # Clear buffers before fork to avoid duplicate output sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid == 0: ourpid = os.getpid() try: newbuilddir = None stream = os.fdopen(c2pwrite, 'wb', 1) os.close(c2pread) # Create a new separate BUILDDIR for each group of tests if 'BUILDDIR' in os.environ: builddir = os.environ['BUILDDIR'] newbuilddir = builddir + "-st-" + str(ourpid) newselftestdir = newbuilddir + "/meta-selftest" bb.utils.mkdirhier(newbuilddir) oe.path.copytree(builddir + "/conf", newbuilddir + "/conf") oe.path.copytree(builddir + "/cache", newbuilddir + "/cache") oe.path.copytree(selftestdir, newselftestdir) for e in os.environ: if builddir in os.environ[e]: os.environ[e] = os.environ[e].replace(builddir, newbuilddir) subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True) # Tried to used bitbake-layers add/remove but it requires recipe parsing and hence is too slow subprocess.check_output("sed %s/conf/bblayers.conf -i -e 's#%s#%s#g'" % (newbuilddir, selftestdir, newselftestdir), cwd=newbuilddir, shell=True) os.chdir(newbuilddir) for t in process_suite: if not hasattr(t, "tc"): continue cp = t.tc.config_paths for p in cp: if selftestdir in cp[p] and newselftestdir not in cp[p]: cp[p] = cp[p].replace(selftestdir, newselftestdir) if builddir in cp[p] and newbuilddir not in cp[p]: cp[p] = cp[p].replace(builddir, newbuilddir) # Leave stderr and stdout open so we can see test noise # Close stdin so that the child goes away if it decides to # read from stdin (otherwise its a roulette to see what # child actually gets keystrokes for pdb etc). newsi = os.open(os.devnull, os.O_RDWR) os.dup2(newsi, sys.stdin.fileno()) subunit_client = TestProtocolClient(stream) # Force buffering of stdout/stderr so the console doesn't get corrupted by test output # as per default in parent code subunit_client.buffer = True subunit_result = AutoTimingTestResultDecorator(subunit_client) process_suite.run(subunit_result) if ourpid != os.getpid(): os._exit(0) if newbuilddir: removebuilddir(newbuilddir) except: # Don't do anything with process children if ourpid != os.getpid(): os._exit(1) # Try and report traceback on stream, but exit with error # even if stream couldn't be created or something else # goes wrong. The traceback is formatted to a string and # written in one go to avoid interleaving lines from # multiple failing children. try: stream.write(traceback.format_exc().encode('utf-8')) except: sys.stderr.write(traceback.format_exc()) finally: if newbuilddir: removebuilddir(newbuilddir) stream.flush() os._exit(1) stream.flush() os._exit(0) else: os.close(c2pwrite) stream = os.fdopen(c2pread, 'rb', 1) test = ProtocolTestCase(stream) result.append((test, numtests)) return result, totaltests
def run(self, test): "Run the given test case or test suite." result = TestProtocolClient(self.stream) test(result) return result
def prepareTestResult(self, result): from subunit import TestProtocolClient return TestProtocolClient(self.stream)
def test_multiple_errors(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.addError(test1, create_error('error1')) client.stopTest(test1) client.startTest(test2) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setupStep(subunit.SubunitShellCommand(command='test')) self.expectCommands( ExpectShell(workdir='wkdir', command="test") + Expect.log('stdio', stdout=stream.getvalue()) + 0) self.expectOutcome( result=FAILURE, state_string="shell Total 2 test(s) 2 errors (failure)") self.expectLogfile('problems', re.compile(r'''test1 testtools.testresult.real._StringException:.*ValueError: invalid literal for int\(\) with base 10: '_error1' test2 testtools.testresult.real._StringException:.*ValueError: invalid literal for int\(\) with base 10: '_error2' .*''', re.MULTILINE | re.DOTALL)) # noqa pylint: disable=line-too-long return self.runStep()