def test_multiple_errors(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.addError(test1, create_error('error1')) client.stopTest(test1) client.startTest(test2) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setup_step(subunit.SubunitShellCommand(command='test')) self.expect_commands( ExpectShell(workdir='wkdir', command="test").stdout(stream.getvalue()).exit(0)) self.expect_outcome( result=FAILURE, state_string="shell Total 2 test(s) 2 errors (failure)") self.expect_log_file('problems', re.compile(r'''test1 testtools.testresult.real._StringException:.*ValueError: invalid literal for int\(\) with base 10: '_error1' test2 testtools.testresult.real._StringException:.*ValueError: invalid literal for int\(\) with base 10: '_error2' .*''', re.MULTILINE | re.DOTALL)) # noqa pylint: disable=line-too-long return self.run_step()
def test_warnings(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.stopTest(test1) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setupStep(subunit.SubunitShellCommand(command='test')) self.expectCommands( ExpectShell(workdir='wkdir', command="test") + Expect.log('stdio', stdout=stream.getvalue()) + 0) self.expectOutcome( result=SUCCESS, # N.B. not WARNINGS state_string="shell 1 test passed") # note that the warnings list is ignored.. self.expectLogfile( 'warnings', '''\ error: test2 [ Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '_error2' ] ''') return self.runStep()
def test_multiple_errors(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.addError(test1, create_error('error1')) client.stopTest(test1) client.startTest(test2) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setupStep(subunit.SubunitShellCommand(command='test')) self.expectCommands( ExpectShell(workdir='wkdir', command="test") + Expect.log('stdio', stdout=stream.getvalue()) + 0) self.expectOutcome( result=FAILURE, state_string="shell Total 2 test(s) 2 errors (failure)") self.expectLogfile( 'problems', '''\ test1 testtools.testresult.real._StringException: Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '_error1' test2 testtools.testresult.real._StringException: Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '_error2' ''') return self.runStep()
def test_success(self): stream = io.BytesIO() client = TestProtocolClient(stream) test = FakeTest(id='test1') client.startTest(test) client.stopTest(test) self.setup_step(subunit.SubunitShellCommand(command='test')) self.expect_commands( ExpectShell(workdir='wkdir', command="test").stdout(stream.getvalue()).exit(0)) self.expect_outcome(result=SUCCESS, state_string="shell 1 test passed") return self.run_step()
def test_error(self): stream = io.BytesIO() client = TestProtocolClient(stream) test = FakeTest(id='test1') client.startTest(test) client.addError(test, create_error('error1')) client.stopTest(test) self.setupStep(subunit.SubunitShellCommand(command='test')) self.expectCommands( ExpectShell(workdir='wkdir', command="test") + Expect.log('stdio', stdout=stream.getvalue()) + 0) self.expectOutcome( result=FAILURE, state_string="shell Total 1 test(s) 1 error (failure)") self.expectLogfile('problems', re.compile(r'''test1 testtools.testresult.real._StringException:.*ValueError: invalid literal for int\(\) with base 10: '_error1' .*''', re.MULTILINE | re.DOTALL)) # noqa pylint: disable=line-too-long return self.runStep()
def test_warnings(self): stream = io.BytesIO() client = TestProtocolClient(stream) test1 = FakeTest(id='test1') test2 = FakeTest(id='test2') client.startTest(test1) client.stopTest(test1) client.addError(test2, create_error('error2')) client.stopTest(test2) self.setup_step(subunit.SubunitShellCommand(command='test')) self.expect_commands( ExpectShell(workdir='wkdir', command="test").stdout(stream.getvalue()).exit(0)) self.expect_outcome( result=SUCCESS, # N.B. not WARNINGS state_string="shell 1 test passed") # note that the warnings list is ignored.. self.expect_log_file('warnings', re.compile(r'''error: test2 \[.* ValueError: invalid literal for int\(\) with base 10: '_error2' \] ''', re.MULTILINE | re.DOTALL)) # noqa pylint: disable=line-too-long return self.run_step()
class SubunitReporter: """ Reports test output via Subunit. @ivar _subunit: The subunit protocol client that we are wrapping. @ivar _successful: An internal variable, used to track whether we have received only successful results. @since: 10.0 """ testsRun = None def __init__(self, stream=sys.stdout, tbformat="default", realtime=False, publisher=None): """ Construct a L{SubunitReporter}. @param stream: A file-like object representing the stream to print output to. Defaults to stdout. @param tbformat: The format for tracebacks. Ignored, since subunit always uses Python's standard format. @param realtime: Whether or not to print exceptions in the middle of the test results. Ignored, since subunit always does this. @param publisher: The log publisher which will be preserved for reporting events. Ignored, as it's not relevant to subunit. """ if TestProtocolClient is None: raise Exception("Subunit not available") self._subunit = TestProtocolClient(stream) self._successful = True def done(self): """ Record that the entire test suite run is finished. We do nothing, since a summary clause is irrelevant to the subunit protocol. """ pass @property def shouldStop(self): """ Whether or not the test runner should stop running tests. """ return self._subunit.shouldStop def stop(self): """ Signal that the test runner should stop running tests. """ return self._subunit.stop() def wasSuccessful(self): """ Has the test run been successful so far? @return: C{True} if we have received no reports of errors or failures, C{False} otherwise. """ # Subunit has a bug in its implementation of wasSuccessful, see # https://bugs.edge.launchpad.net/subunit/+bug/491090, so we can't # simply forward it on. return self._successful def startTest(self, test): """ Record that C{test} has started. """ return self._subunit.startTest(test) def stopTest(self, test): """ Record that C{test} has completed. """ return self._subunit.stopTest(test) def addSuccess(self, test): """ Record that C{test} was successful. """ return self._subunit.addSuccess(test) def addSkip(self, test, reason): """ Record that C{test} was skipped for C{reason}. Some versions of subunit don't have support for addSkip. In those cases, the skip is reported as a success. @param test: A unittest-compatible C{TestCase}. @param reason: The reason for it being skipped. The C{str()} of this object will be included in the subunit output stream. """ addSkip = getattr(self._subunit, "addSkip", None) if addSkip is None: self.addSuccess(test) else: self._subunit.addSkip(test, reason) def addError(self, test, err): """ Record that C{test} failed with an unexpected error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addError(test, util.excInfoOrFailureToExcInfo(err)) def addFailure(self, test, err): """ Record that C{test} failed an assertion with the error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addFailure(test, util.excInfoOrFailureToExcInfo(err)) def addExpectedFailure(self, test, failure, todo=None): """ Record an expected failure from a test. Some versions of subunit do not implement this. For those versions, we record a success. """ failure = util.excInfoOrFailureToExcInfo(failure) addExpectedFailure = getattr(self._subunit, "addExpectedFailure", None) if addExpectedFailure is None: self.addSuccess(test) else: addExpectedFailure(test, failure) def addUnexpectedSuccess(self, test, todo=None): """ Record an unexpected success. Since subunit has no way of expressing this concept, we record a success on the subunit stream. """ # Not represented in pyunit/subunit. self.addSuccess(test)
class SubunitReporter(object): """ Reports test output via Subunit. @ivar _subunit: The subunit protocol client that we are wrapping. @ivar _successful: An internal variable, used to track whether we have received only successful results. @since: 10.0 """ def __init__(self, stream=sys.stdout, tbformat='default', realtime=False, publisher=None): """ Construct a L{SubunitReporter}. @param stream: A file-like object representing the stream to print output to. Defaults to stdout. @param tbformat: The format for tracebacks. Ignored, since subunit always uses Python's standard format. @param realtime: Whether or not to print exceptions in the middle of the test results. Ignored, since subunit always does this. @param publisher: The log publisher which will be preserved for reporting events. Ignored, as it's not relevant to subunit. """ if TestProtocolClient is None: raise Exception("Subunit not available") self._subunit = TestProtocolClient(stream) self._successful = True def done(self): """ Record that the entire test suite run is finished. We do nothing, since a summary clause is irrelevant to the subunit protocol. """ pass def shouldStop(self): """ Whether or not the test runner should stop running tests. """ return self._subunit.shouldStop shouldStop = property(shouldStop) def stop(self): """ Signal that the test runner should stop running tests. """ return self._subunit.stop() def wasSuccessful(self): """ Has the test run been successful so far? @return: C{True} if we have received no reports of errors or failures, C{False} otherwise. """ # Subunit has a bug in its implementation of wasSuccessful, see # https://bugs.edge.launchpad.net/subunit/+bug/491090, so we can't # simply forward it on. return self._successful def startTest(self, test): """ Record that C{test} has started. """ return self._subunit.startTest(test) def stopTest(self, test): """ Record that C{test} has completed. """ return self._subunit.stopTest(test) def addSuccess(self, test): """ Record that C{test} was successful. """ return self._subunit.addSuccess(test) def addSkip(self, test, reason): """ Record that C{test} was skipped for C{reason}. Some versions of subunit don't have support for addSkip. In those cases, the skip is reported as a success. @param test: A unittest-compatible C{TestCase}. @param reason: The reason for it being skipped. The C{str()} of this object will be included in the subunit output stream. """ addSkip = getattr(self._subunit, 'addSkip', None) if addSkip is None: self.addSuccess(test) else: self._subunit.addSkip(test, reason) def addError(self, test, err): """ Record that C{test} failed with an unexpected error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addError( test, util.excInfoOrFailureToExcInfo(err)) def addFailure(self, test, err): """ Record that C{test} failed an assertion with the error C{err}. Also marks the run as being unsuccessful, causing L{SubunitReporter.wasSuccessful} to return C{False}. """ self._successful = False return self._subunit.addFailure( test, util.excInfoOrFailureToExcInfo(err)) def addExpectedFailure(self, test, failure, todo): """ Record an expected failure from a test. Some versions of subunit do not implement this. For those versions, we record a success. """ failure = util.excInfoOrFailureToExcInfo(failure) addExpectedFailure = getattr(self._subunit, 'addExpectedFailure', None) if addExpectedFailure is None: self.addSuccess(test) else: addExpectedFailure(test, failure) def addUnexpectedSuccess(self, test, todo): """ Record an unexpected success. Since subunit has no way of expressing this concept, we record a success on the subunit stream. """ # Not represented in pyunit/subunit. self.addSuccess(test)