Пример #1
0
class UnicodeWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """
    def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
        # Redirect output to a queue
        self.queue = StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        self.writer.writerow([six.text_type(s).encode("utf-8") for s in row])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Пример #2
0
class ResultCollector(object):
    """Collecter for test results.  This handles creating
    :class:`~.TestResult` instances and handing them off the registered
    result output handlers.

    """

    # Temporary compatibility with unittest's runner
    separator2 = separator2

    def __init__(self, buffer=False, failfast=False):
        self.buffer = buffer
        self.failfast = failfast
        self._result_handlers = []
        self._sorted_handlers = None
        self.testsRun = 0
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.skipped = []
        self.failures = []
        self.errors = []
        self.shouldStop = False
        self._successful = True
        self._mirror_output = False
        self._stderr_buffer = None
        self._stdout_buffer = None
        self._original_stderr = sys.stderr
        self._original_stdout = sys.stdout
        self._test_timing = {}

    @property
    def _handlers(self):
        if self._sorted_handlers is None:
            from .plugins.result_handler import sort_result_handlers
            self._sorted_handlers = sort_result_handlers(self._result_handlers)
        return self._sorted_handlers

    @staticmethod
    def _testcase_to_key(test):
        return (type(test), test._testMethodName)

    def _setup_stdout(self):
        """Hook stdout and stderr if buffering is enabled.

        """
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
                self._stdout_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def _restore_stdout(self):
        """Unhook stdout and stderr if buffering is enabled.

        """
        if self.buffer:
            if self._mirror_output:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()

    def printErrors(self):  # pragma: no cover
        # FIXME: Remove
        pass

    def add_result_handler(self, handler):
        """Register a new result handler.

        """
        self._result_handlers.append(handler)
        # Reset sorted handlers
        if self._sorted_handlers:
            self._sorted_handlers = None

    def startTest(self, test, start_time=None):
        """Indicate that an individual test is starting.

        Parameters
        ----------
        test : unittest.TestCase
            The test that is starting.
        start_time : datetime
            An internal parameter to allow the parallel test runner to
            set the actual start time of a test run in a subprocess.

        """
        if start_time is None:
            start_time = datetime.utcnow()
        self._test_timing[self._testcase_to_key(test)] = start_time
        self._mirror_output = False
        self._setup_stdout()
        self.testsRun += 1
        for handler in self._handlers:
            handler.start_test(test)

    def stopTest(self, test):
        """Indicate that an individual test has completed.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        for handler in self._handlers:
            handler.stop_test(test)
        self._restore_stdout()
        self._mirror_output = False

    def startTestRun(self):
        """Indicate that the test run is starting.

        """
        for handler in self._handlers:
            handler.start_test_run()

    def stopTestRun(self):
        """Indicate that the test run has completed.

        """
        for handler in self._handlers:
            handler.stop_test_run()

    def add_result(self, result):
        """Add an already-constructed :class:`~.TestResult` to this
        :class:`~.ResultCollector`.

        This may be used when collecting results created by other
        ResultCollectors (e.g. in subprocesses).

        """
        for handler in self._handlers:
            handler(result)
        if self._successful and result.status not in _successful_results:
            self._successful = False

    def _handle_result(self, test, status, exception=None, message=None):
        """Create a :class:`~.TestResult` and add it to this
        :class:`~ResultCollector`.

        Parameters
        ----------
        test : unittest.TestCase
            The test that this result will represent.
        status : haas.result.TestCompletionStatus
            The status of the test.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.
        message : str
            Optional message associated with the result (e.g. skip
            reason).

        """
        if self.buffer:
            stderr = self._stderr_buffer.getvalue()
            stdout = self._stdout_buffer.getvalue()
        else:
            stderr = stdout = None

        started_time = self._test_timing.get(self._testcase_to_key(test))
        if started_time is None and isinstance(test, ErrorHolder):
            started_time = datetime.utcnow()
        elif started_time is None:
            raise RuntimeError(
                'Missing test start! Please report this error as a bug in '
                'haas.')

        completion_time = datetime.utcnow()
        duration = TestDuration(started_time, completion_time)
        result = TestResult.from_test_case(
            test,
            status,
            duration=duration,
            exception=exception,
            message=message,
            stdout=stdout,
            stderr=stderr,
        )
        self.add_result(result)
        return result

    @failfast
    def addError(self, test, exception):
        """Register that a test ended in an error.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(
            test, TestCompletionStatus.error, exception=exception)
        self.errors.append(result)
        self._mirror_output = True

    @failfast
    def addFailure(self, test, exception):
        """Register that a test ended with a failure.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(
            test, TestCompletionStatus.failure, exception=exception)
        self.failures.append(result)
        self._mirror_output = True

    def addSuccess(self, test):
        """Register that a test ended in success.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        self._handle_result(test, TestCompletionStatus.success)

    def addSkip(self, test, reason):
        """Register that a test that was skipped.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        reason : str
            The reason the test was skipped.

        """
        result = self._handle_result(
            test, TestCompletionStatus.skipped, message=reason)
        self.skipped.append(result)

    def addExpectedFailure(self, test, exception):
        """Register that a test that failed and was expected to fail.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(
            test, TestCompletionStatus.expected_failure, exception=exception)
        self.expectedFailures.append(result)

    @failfast
    def addUnexpectedSuccess(self, test):
        """Register a test that passed unexpectedly.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        result = self._handle_result(
            test, TestCompletionStatus.unexpected_success)
        self.unexpectedSuccesses.append(result)

    def wasSuccessful(self):
        """Return ``True`` if the run was successful.

        """
        return self._successful

    def stop(self):
        """Set the ``shouldStop`` flag, used by the test cases to determine if
        they should terminate early.

        """
        self.shouldStop = True
Пример #3
0
class TestResult(unittest.TestResult):
    """Holder for test result information.

    Test results are automatically managed by the TestCase and TestSuite
    classes, and do not need to be explicitly manipulated by writers of tests.

    Each instance holds the total number of tests run, and collections of
    failures and errors that occurred among those test runs. The collections
    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
    formatted traceback of the error that occurred.
    """
    _previousTestClass = None
    _moduleSetUpFailed = False

    def __init__(self, stream=None, descriptions=None, verbosity=None):
        self.failfast = False
        self.failures = []
        self.errors = []
        self.testsRun = 0
        self.skipped = []
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.shouldStop = False
        self.buffer = False
        self.tb_locals = False
        self._stdout_buffer = None
        self._stderr_buffer = None
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr
        self._mirrorOutput = False

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.testsRun += 1
        self._mirrorOutput = False
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
                self._stdout_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def startTestRun(self):
        """Called once before any tests are executed.

        See startTest for a method called before each test.
        """

    def stopTest(self, test):
        """Called when the given test has been run"""
        if self.buffer:
            if self._mirrorOutput:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()
        self._mirrorOutput = False

    def stopTestRun(self):
        """Called once after all tests are executed.

        See stopTest for a method called after each test.
        """

    @failfast
    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    @failfast
    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    def addSubTest(self, test, subtest, err):
        """Called at the end of a subtest.
        'err' is None if the subtest ended successfully, otherwise it's a
        tuple of values as returned by sys.exc_info().
        """
        # By default, we don't do anything with successful subtests, but
        # more sophisticated test results might want to record them.
        if err is not None:
            if getattr(self, 'failfast', False):
                self.stop()
            if issubclass(err[0], test.failureException):
                errors = self.failures
            else:
                errors = self.errors
            errors.append((subtest, self._exc_info_to_string(err, test)))
            self._mirrorOutput = True

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        pass

    def addSkip(self, test, reason):
        """Called when a test is skipped."""
        self.skipped.append((test, reason))

    def addExpectedFailure(self, test, err):
        """Called when an expected failure/error occured."""
        self.expectedFailures.append(
            (test, self._exc_info_to_string(err, test)))

    @failfast
    def addUnexpectedSuccess(self, test):
        """Called when a test was expected to fail, but succeed."""
        self.unexpectedSuccesses.append(test)

    def wasSuccessful(self):
        """Tells whether or not this result was a success."""
        # The hasattr check is for test_result's OldResult test.  That
        # way this method works on objects that lack the attribute.
        # (where would such result intances come from? old stored pickles?)
        return ((len(self.failures) == len(self.errors) == 0)
                and (not hasattr(self, 'unexpectedSuccesses')
                     or len(self.unexpectedSuccesses) == 0))

    def stop(self):
        """Indicates that the tests should be aborted."""
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next
        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
        else:
            length = None
        tb_e = traceback.TracebackException(exctype,
                                            value,
                                            tb,
                                            limit=length,
                                            capture_locals=self.tb_locals)
        msgLines = list(tb_e.format())

        if self.buffer:
            output = sys.stdout.getvalue()
            error = sys.stderr.getvalue()
            if output:
                if not output.endswith('\n'):
                    output += '\n'
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith('\n'):
                    error += '\n'
                msgLines.append(STDERR_LINE % error)
        return ''.join(msgLines)

    def _is_relevant_tb_level(self, tb):
        return '__unittest' in tb.tb_frame.f_globals

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return "<%s run=%i errors=%i failures=%i>" % \
               (util.strclass(self.__class__), self.testsRun, len(self.errors),
                len(self.failures))
Пример #4
0
class ResultCollector(object):
    """Collecter for test results.  This handles creating
    :class:`~.TestResult` instances and handing them off the registered
    result output handlers.

    """

    # Temporary compatibility with unittest's runner
    separator2 = separator2

    def __init__(self, buffer=False, failfast=False):
        self.buffer = buffer
        self.failfast = failfast
        self._result_handlers = []
        self._sorted_handlers = None
        self.testsRun = 0
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.skipped = []
        self.failures = []
        self.errors = []
        self.shouldStop = False
        self._successful = True
        self._mirror_output = False
        self._stderr_buffer = None
        self._stdout_buffer = None
        self._original_stderr = sys.stderr
        self._original_stdout = sys.stdout
        self._test_timing = {}

    @property
    def _handlers(self):
        if self._sorted_handlers is None:
            from .plugins.result_handler import sort_result_handlers
            self._sorted_handlers = sort_result_handlers(self._result_handlers)
        return self._sorted_handlers

    @staticmethod
    def _testcase_to_key(test):
        return (type(test), test._testMethodName)

    def _setup_stdout(self):
        """Hook stdout and stderr if buffering is enabled.

        """
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
                self._stdout_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def _restore_stdout(self):
        """Unhook stdout and stderr if buffering is enabled.

        """
        if self.buffer:
            if self._mirror_output:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()

    def printErrors(self):  # pragma: no cover
        # FIXME: Remove
        pass

    def add_result_handler(self, handler):
        """Register a new result handler.

        """
        self._result_handlers.append(handler)
        # Reset sorted handlers
        if self._sorted_handlers:
            self._sorted_handlers = None

    def startTest(self, test, start_time=None):
        """Indicate that an individual test is starting.

        Parameters
        ----------
        test : unittest.TestCase
            The test that is starting.
        start_time : datetime
            An internal parameter to allow the parallel test runner to
            set the actual start time of a test run in a subprocess.

        """
        if start_time is None:
            start_time = datetime.utcnow()
        self._test_timing[self._testcase_to_key(test)] = start_time
        self._mirror_output = False
        self._setup_stdout()
        self.testsRun += 1
        for handler in self._handlers:
            handler.start_test(test)

    def stopTest(self, test):
        """Indicate that an individual test has completed.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        for handler in self._handlers:
            handler.stop_test(test)
        self._restore_stdout()
        self._mirror_output = False

    def startTestRun(self):
        """Indicate that the test run is starting.

        """
        for handler in self._handlers:
            handler.start_test_run()

    def stopTestRun(self):
        """Indicate that the test run has completed.

        """
        for handler in self._handlers:
            handler.stop_test_run()

    def add_result(self, result):
        """Add an already-constructed :class:`~.TestResult` to this
        :class:`~.ResultCollector`.

        This may be used when collecting results created by other
        ResultCollectors (e.g. in subprocesses).

        """
        for handler in self._handlers:
            handler(result)
        if self._successful and result.status not in _successful_results:
            self._successful = False

    def _handle_result(self, test, status, exception=None, message=None):
        """Create a :class:`~.TestResult` and add it to this
        :class:`~ResultCollector`.

        Parameters
        ----------
        test : unittest.TestCase
            The test that this result will represent.
        status : haas.result.TestCompletionStatus
            The status of the test.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.
        message : str
            Optional message associated with the result (e.g. skip
            reason).

        """
        if self.buffer:
            stderr = self._stderr_buffer.getvalue()
            stdout = self._stdout_buffer.getvalue()
        else:
            stderr = stdout = None

        started_time = self._test_timing.get(self._testcase_to_key(test))
        if started_time is None and isinstance(test, ErrorHolder):
            started_time = datetime.utcnow()
        elif started_time is None:
            raise RuntimeError(
                'Missing test start! Please report this error as a bug in '
                'haas.')

        completion_time = datetime.utcnow()
        duration = TestDuration(started_time, completion_time)
        result = TestResult.from_test_case(
            test,
            status,
            duration=duration,
            exception=exception,
            message=message,
            stdout=stdout,
            stderr=stderr,
        )
        self.add_result(result)
        return result

    @failfast
    def addError(self, test, exception):
        """Register that a test ended in an error.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.error,
                                     exception=exception)
        self.errors.append(result)
        self._mirror_output = True

    @failfast
    def addFailure(self, test, exception):
        """Register that a test ended with a failure.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.failure,
                                     exception=exception)
        self.failures.append(result)
        self._mirror_output = True

    def addSuccess(self, test):
        """Register that a test ended in success.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        self._handle_result(test, TestCompletionStatus.success)

    def addSkip(self, test, reason):
        """Register that a test that was skipped.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        reason : str
            The reason the test was skipped.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.skipped,
                                     message=reason)
        self.skipped.append(result)

    def addExpectedFailure(self, test, exception):
        """Register that a test that failed and was expected to fail.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.
        exception : tuple
            ``exc_info`` tuple ``(type, value, traceback)``.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.expected_failure,
                                     exception=exception)
        self.expectedFailures.append(result)

    @failfast
    def addUnexpectedSuccess(self, test):
        """Register a test that passed unexpectedly.

        Parameters
        ----------
        test : unittest.TestCase
            The test that has completed.

        """
        result = self._handle_result(test,
                                     TestCompletionStatus.unexpected_success)
        self.unexpectedSuccesses.append(result)

    def wasSuccessful(self):
        """Return ``True`` if the run was successful.

        """
        return self._successful

    def stop(self):
        """Set the ``shouldStop`` flag, used by the test cases to determine if
        they should terminate early.

        """
        self.shouldStop = True
Пример #5
0
class JsonPrinter(resource_printer_base.ResourcePrinter):
    """Prints resource records as a JSON list.

  [JSON](http://www.json.org), JavaScript Object Notation.

  Printer attributes:
    no-undefined: Does not display resource data items with null values.

  Attributes:
    _buffer: Buffer stream for record item indentation.
    _delimiter: Delimiter string before the next record.
    _empty: True if no records were output.
    _indent: Resource item indentation.
  """

    # json.dump() does not have a streaming mode. In order to print a resource`
    # list it requires the complete list contents. To get around that limitation
    # and print each resource list item, _AddRecord() prints the initial "[", the
    # intervening ",", the final "]", captures the json.dump() output for each
    # resource list item and prints it indented by STRUCTURED_INDENTATION spaces.

    _BEGIN_DELIMITER = '[\n'

    def __init__(self, *args, **kwargs):
        super(JsonPrinter, self).__init__(*args,
                                          retain_none_values=True,
                                          **kwargs)
        self._buffer = StringIO()
        self._empty = True
        self._delimiter = self._BEGIN_DELIMITER
        self._indent = ' ' * resource_printer_base.STRUCTURED_INDENTATION

    def __Dump(self, resource, out=None):
        json.dump(resource,
                  fp=out or self._out,
                  indent=resource_printer_base.STRUCTURED_INDENTATION,
                  sort_keys=True,
                  separators=(',', ': '))

    def _AddRecord(self, record, delimit=True):
        """Prints one element of a JSON-serializable Python object resource list.

    Allows intermingled delimit=True and delimit=False.

    Args:
      record: A JSON-serializable object.
      delimit: Dump one record if False, used by PrintSingleRecord().
    """
        self._empty = False
        if delimit:
            delimiter = self._delimiter + self._indent
            self._delimiter = ',\n'
            self.__Dump(record, self._buffer)
            output = self._buffer.getvalue()
            self._buffer.truncate(0)
            for line in output.split('\n'):
                self._out.write(delimiter + line)
                delimiter = '\n' + self._indent
        else:
            if self._delimiter != self._BEGIN_DELIMITER:
                self._out.write('\n]\n')
                self._delimiter = self._BEGIN_DELIMITER
            self.__Dump(record)
            self._out.write('\n')

    def Finish(self):
        """Prints the final delimiter and preps for the next resource list."""
        if self._empty:
            self._out.write('[]\n')
        elif self._delimiter != self._BEGIN_DELIMITER:
            self._out.write('\n]\n')
            self._delimiter = self._BEGIN_DELIMITER
Пример #6
0
class _XMLTestResult(_TextTestResult):
    """
    A test result class that can express test results in a XML report.

    Used by XMLTestRunner.
    """
    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
                 elapsed_times=True, properties=None, infoclass=None):
        _TextTestResult.__init__(self, stream, descriptions, verbosity)
        self._stdout_data = None
        self._stderr_data = None
        self._stdout_capture = StringIO()
        self.__stdout_saved = None
        self._stderr_capture = StringIO()
        self.__stderr_saved = None
        self.successes = []
        self.callback = None
        self.elapsed_times = elapsed_times
        self.properties = properties  # junit testsuite properties
        if infoclass is None:
            self.infoclass = _TestInfo
        else:
            self.infoclass = infoclass

    def _prepare_callback(self, test_info, target_list, verbose_str,
                          short_str):
        """
        Appends a `infoclass` to the given target list and sets a callback
        method to be called by stopTest method.
        """
        target_list.append(test_info)

        def callback():
            """Prints the test method outcome to the stream, as well as
            the elapsed time.
            """

            test_info.test_finished()

            # Ignore the elapsed times for a more reliable unit testing
            if not self.elapsed_times:
                self.start_time = self.stop_time = 0

            if self.showAll:
                self.stream.writeln(
                    '%s (%.3fs)' % (verbose_str, test_info.elapsed_time)
                )
            elif self.dots:
                self.stream.write(short_str)

            self.stream.flush()

        self.callback = callback

    def startTest(self, test):
        """
        Called before execute each test method.
        """
        self.start_time = time.time()
        TestResult.startTest(self, test)

        if self.showAll:
            self.stream.write('  ' + self.getDescription(test))
            self.stream.write(" ... ")
            self.stream.flush()

    def _setupStdout(self):
        """
        Capture stdout / stderr by replacing sys.stdout / sys.stderr
        """
        super(_XMLTestResult, self)._setupStdout()
        self.__stdout_saved = sys.stdout
        sys.stdout = _DuplicateWriter(sys.stdout, self._stdout_capture)
        self.__stderr_saved = sys.stderr
        sys.stderr = _DuplicateWriter(sys.stderr, self._stderr_capture)

    def _restoreStdout(self):
        """
        Stop capturing stdout / stderr and recover sys.stdout / sys.stderr
        """
        if self.__stdout_saved:
            sys.stdout = self.__stdout_saved
            self.__stdout_saved = None
        if self.__stderr_saved:
            sys.stderr = self.__stderr_saved
            self.__stderr_saved = None
        self._stdout_capture.seek(0)
        self._stdout_capture.truncate()
        self._stderr_capture.seek(0)
        self._stderr_capture.truncate()
        super(_XMLTestResult, self)._restoreStdout()

    def _save_output_data(self):
        self._stdout_data = self._stdout_capture.getvalue()
        self._stderr_data = self._stderr_capture.getvalue()

    def stopTest(self, test):
        """
        Called after execute each test method.
        """
        self._save_output_data()
        # self._stdout_data = sys.stdout.getvalue()
        # self._stderr_data = sys.stderr.getvalue()

        _TextTestResult.stopTest(self, test)
        self.stop_time = time.time()

        if self.callback and callable(self.callback):
            self.callback()
            self.callback = None

    def addSuccess(self, test):
        """
        Called when a test executes successfully.
        """
        self._save_output_data()
        self._prepare_callback(
            self.infoclass(self, test), self.successes, 'OK', '.'
        )

    @failfast
    def addFailure(self, test, err):
        """
        Called when a test method fails.
        """
        self._save_output_data()
        testinfo = self.infoclass(
            self, test, self.infoclass.FAILURE, err)
        self.failures.append((
            testinfo,
            self._exc_info_to_string(err, test)
        ))
        self._prepare_callback(testinfo, [], 'FAIL', 'F')

    @failfast
    def addError(self, test, err):
        """
        Called when a test method raises an error.
        """
        self._save_output_data()
        testinfo = self.infoclass(
            self, test, self.infoclass.ERROR, err)
        self.errors.append((
            testinfo,
            self._exc_info_to_string(err, test)
        ))
        self._prepare_callback(testinfo, [], 'ERROR', 'E')

    def addSubTest(self, testcase, test, err):
        """
        Called when a subTest method raises an error.
        """
        if err is not None:

            errorText = None
            errorValue = None
            errorList = None
            if issubclass(err[0], test.failureException):
                errorText = 'FAIL'
                errorValue = self.infoclass.FAILURE
                errorList = self.failures

            else:
                errorText = 'ERROR'
                errorValue = self.infoclass.ERROR
                errorList = self.errors

            self._save_output_data()

            testinfo = self.infoclass(
                self, testcase, errorValue, err, subTest=test)
            errorList.append((
                testinfo,
                self._exc_info_to_string(err, testcase)
            ))
            self._prepare_callback(testinfo, [], errorText, errorText[0])

    def addSkip(self, test, reason):
        """
        Called when a test method was skipped.
        """
        self._save_output_data()
        testinfo = self.infoclass(
            self, test, self.infoclass.SKIP, reason)
        self.skipped.append((testinfo, reason))
        self._prepare_callback(testinfo, [], 'SKIP', 'S')

    def addExpectedFailure(self, test, err):
        """
        Missing in xmlrunner, copy-pasted from xmlrunner addError.
        """
        self._save_output_data()

        testinfo = self.infoclass(self, test, self.infoclass.ERROR, err)
        testinfo.test_exception_name = 'ExpectedFailure'
        testinfo.test_exception_message = 'EXPECTED FAILURE: {}'.format(testinfo.test_exception_message)

        self.expectedFailures.append((testinfo, self._exc_info_to_string(err, test)))
        self._prepare_callback(testinfo, [], 'EXPECTED FAILURE', 'X')

    @failfast
    def addUnexpectedSuccess(self, test):
        """
        Missing in xmlrunner, copy-pasted from xmlrunner addSuccess.
        """
        self._save_output_data()

        testinfo = self.infoclass(self, test)  # do not set outcome here because it will need exception
        testinfo.outcome = self.infoclass.ERROR
        # But since we want to have error outcome, we need to provide additional fields:
        testinfo.test_exception_name = 'UnexpectedSuccess'
        testinfo.test_exception_message = ('UNEXPECTED SUCCESS: This test was marked as expected failure but passed, '
                                           'please review it')

        self.unexpectedSuccesses.append(testinfo)
        self._prepare_callback(testinfo, [], 'UNEXPECTED SUCCESS', 'U')

    def printErrorList(self, flavour, errors):
        """
        Writes information about the FAIL or ERROR to the stream.
        """
        for test_info, dummy in errors:
            self.stream.writeln(self.separator1)
            self.stream.writeln(
                '%s [%.3fs]: %s' % (flavour, test_info.elapsed_time,
                                    test_info.get_description())
            )
            self.stream.writeln(self.separator2)
            self.stream.writeln('%s' % test_info.get_error_info())
            self.stream.flush()

    def _get_info_by_testcase(self):
        """
        Organizes test results by TestCase module. This information is
        used during the report generation, where a XML report will be created
        for each TestCase.
        """
        tests_by_testcase = {}

        for tests in (self.successes, self.failures, self.errors,
                      self.skipped):
            for test_info in tests:
                if isinstance(test_info, tuple):
                    # This is a skipped, error or a failure test case
                    test_info = test_info[0]
                testcase_name = test_info.test_name
                if testcase_name not in tests_by_testcase:
                    tests_by_testcase[testcase_name] = []
                tests_by_testcase[testcase_name].append(test_info)

        return tests_by_testcase

    def _report_testsuite_properties(xml_testsuite, xml_document, properties):
        if properties:
            xml_properties = xml_document.createElement('properties')
            xml_testsuite.appendChild(xml_properties)
            for key, value in properties.items():
                prop = xml_document.createElement('property')
                prop.setAttribute('name', str(key))
                prop.setAttribute('value', str(value))
                xml_properties.appendChild(prop)

    _report_testsuite_properties = staticmethod(_report_testsuite_properties)

    def _report_testsuite(suite_name, tests, xml_document, parentElement,
                          properties):
        """
        Appends the testsuite section to the XML document.
        """
        testsuite = xml_document.createElement('testsuite')
        parentElement.appendChild(testsuite)

        testsuite.setAttribute('name', suite_name)
        testsuite.setAttribute('tests', str(len(tests)))

        testsuite.setAttribute(
            'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))
        )
        if tests:
            testsuite.setAttribute(
                'timestamp', max(map(lambda e: e.timestamp, tests))
            )
        failures = filter(lambda e: e.outcome == e.FAILURE, tests)
        testsuite.setAttribute('failures', str(len(list(failures))))

        errors = filter(lambda e: e.outcome == e.ERROR, tests)
        testsuite.setAttribute('errors', str(len(list(errors))))

        skips = filter(lambda e: e.outcome == _TestInfo.SKIP, tests)
        testsuite.setAttribute('skipped', str(len(list(skips))))

        _XMLTestResult._report_testsuite_properties(
            testsuite, xml_document, properties)

        for test in tests:
            _XMLTestResult._report_testcase(test, testsuite, xml_document)

        systemout = xml_document.createElement('system-out')
        testsuite.appendChild(systemout)

        stdout = StringIO()
        for test in tests:
            # Merge the stdout from the tests in a class
            if test.stdout is not None:
                stdout.write(test.stdout)
        _XMLTestResult._createCDATAsections(
            xml_document, systemout, stdout.getvalue())

        systemerr = xml_document.createElement('system-err')
        testsuite.appendChild(systemerr)

        stderr = StringIO()
        for test in tests:
            # Merge the stderr from the tests in a class
            if test.stderr is not None:
                stderr.write(test.stderr)
        _XMLTestResult._createCDATAsections(
            xml_document, systemerr, stderr.getvalue())

        return testsuite

    _report_testsuite = staticmethod(_report_testsuite)

    def _test_method_name(test_id):
        """
        Returns the test method name.
        """
        return test_id.split('.')[-1]

    _test_method_name = staticmethod(_test_method_name)

    def _createCDATAsections(xmldoc, node, text):
        text = safe_unicode(text)
        pos = text.find(']]>')
        while pos >= 0:
            tmp = text[0:pos+2]
            cdata = xmldoc.createCDATASection(tmp)
            node.appendChild(cdata)
            text = text[pos+2:]
            pos = text.find(']]>')
        cdata = xmldoc.createCDATASection(text)
        node.appendChild(cdata)

    _createCDATAsections = staticmethod(_createCDATAsections)

    def _report_testcase(test_result, xml_testsuite, xml_document):
        """
        Appends a testcase section to the XML document.
        """
        testcase = xml_document.createElement('testcase')
        xml_testsuite.appendChild(testcase)

        class_name = re.sub(r'^__main__.', '', test_result.id())
        class_name = class_name.rpartition('.')[0]
        testcase.setAttribute('classname', class_name)
        testcase.setAttribute(
            'name', _XMLTestResult._test_method_name(test_result.test_id)
        )
        testcase.setAttribute('time', '%.3f' % test_result.elapsed_time)
        testcase.setAttribute('timestamp', test_result.timestamp)

        if (test_result.outcome != test_result.SUCCESS):
            elem_name = ('failure', 'error', 'skipped')[test_result.outcome-1]
            failure = xml_document.createElement(elem_name)
            testcase.appendChild(failure)
            if test_result.outcome != test_result.SKIP:
                failure.setAttribute(
                    'type',
                    test_result.test_exception_name
                )
                failure.setAttribute(
                    'message',
                    test_result.test_exception_message
                )
                error_info = safe_unicode(test_result.get_error_info())
                _XMLTestResult._createCDATAsections(
                    xml_document, failure, error_info)
            else:
                failure.setAttribute('type', 'skip')
                failure.setAttribute('message', test_result.test_exception_message)

    _report_testcase = staticmethod(_report_testcase)

    def generate_reports(self, test_runner):
        """
        Generates the XML reports to a given XMLTestRunner object.
        """
        from xml.dom.minidom import Document
        all_results = self._get_info_by_testcase()

        outputHandledAsString = \
            isinstance(test_runner.output, six.string_types)

        if (outputHandledAsString and not os.path.exists(test_runner.output)):
            os.makedirs(test_runner.output)

        if not outputHandledAsString:
            doc = Document()
            testsuite = doc.createElement('testsuites')
            doc.appendChild(testsuite)
            parentElement = testsuite

        for suite, tests in all_results.items():
            if outputHandledAsString:
                doc = Document()
                parentElement = doc

            suite_name = suite
            if test_runner.outsuffix:
                # not checking with 'is not None', empty means no suffix.
                suite_name = '%s-%s' % (suite, test_runner.outsuffix)

            # Build the XML file
            testsuite = _XMLTestResult._report_testsuite(
                suite_name, tests, doc, parentElement, self.properties
            )
            xml_content = doc.toprettyxml(
                indent='\t',
                encoding=test_runner.encoding
            )

            if outputHandledAsString:
                filename = path.join(
                    test_runner.output,
                    'TEST-%s.xml' % suite_name)
                with open(filename, 'wb') as report_file:
                    report_file.write(xml_content)

                self.stream.writeln('Generated XML report: {}'.format(filename))

        if not outputHandledAsString:
            # Assume that test_runner.output is a stream
            test_runner.output.write(xml_content)

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        if six.PY3:
            # It works fine in python 3
            try:
                return super(_XMLTestResult, self)._exc_info_to_string(
                    err, test)
            except AttributeError:
                # We keep going using the legacy python <= 2 way
                pass

        # This comes directly from python2 unittest
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next

        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            msgLines = traceback.format_exception(exctype, value, tb, length)
        else:
            msgLines = traceback.format_exception(exctype, value, tb)

        if self.buffer:
            output = self._stdout_capture.getvalue()
            error = self._stdout_capture.getvalue()
            if output:
                if not output.endswith('\n'):
                    output += '\n'
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith('\n'):
                    error += '\n'
                msgLines.append(STDERR_LINE % error)
        # This is the extra magic to make sure all lines are str
        encoding = getattr(sys.stdout, 'encoding', 'utf-8')
        lines = []
        for line in msgLines:
            if not isinstance(line, str):
                # utf8 shouldnt be hard-coded, but not sure f
                line = line.encode(encoding)
            lines.append(line)

        return ''.join(lines)
Пример #7
0
class _XMLTestResult(_TextTestResult):
    """
    A test result class that can express test results in a XML report.

    Used by XMLTestRunner.
    """
    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
                 elapsed_times=True, properties=None, infoclass=None):
        _TextTestResult.__init__(self, stream, descriptions, verbosity)
        self._stdout_data = None
        self._stderr_data = None
        self._stdout_capture = StringIO()
        self.__stdout_saved = None
        self._stderr_capture = StringIO()
        self.__stderr_saved = None
        self.successes = []
        self.callback = None
        self.elapsed_times = elapsed_times
        self.properties = properties  # junit testsuite properties
        self.filename = None
        self.lineno = None
        if infoclass is None:
            self.infoclass = _TestInfo
        else:
            self.infoclass = infoclass

    def _prepare_callback(self, test_info, target_list, verbose_str,
                          short_str):
        """
        Appends a `infoclass` to the given target list and sets a callback
        method to be called by stopTest method.
        """
        test_info.filename = self.filename
        test_info.lineno = self.lineno
        target_list.append(test_info)

        def callback():
            """Prints the test method outcome to the stream, as well as
            the elapsed time.
            """

            test_info.test_finished()

            # Ignore the elapsed times for a more reliable unit testing
            if not self.elapsed_times:
                self.start_time = self.stop_time = 0

            if self.showAll:
                self.stream.writeln(
                    '%s (%.3fs)' % (verbose_str, test_info.elapsed_time)
                )
            elif self.dots:
                self.stream.write(short_str)

            self.stream.flush()

        self.callback = callback

    def startTest(self, test):
        """
        Called before execute each test method.
        """
        self.start_time = time()
        TestResult.startTest(self, test)

        try:
            if getattr(test, '_dt_test', None) is not None:
                # doctest.DocTestCase
                self.filename = test._dt_test.filename
                self.lineno = test._dt_test.lineno
            else:
                # regular unittest.TestCase?
                test_method = getattr(test, test._testMethodName)
                test_class = type(test)
                # Note: inspect can get confused with decorators, so use class.
                self.filename = inspect.getsourcefile(test_class)
                # Handle partial and partialmethod objects.
                test_method = getattr(test_method, 'func', test_method)
                _, self.lineno = inspect.getsourcelines(test_method)
        except (AttributeError, TypeError):
            # issue #188, #189, some frameworks can make test method opaque.
            pass

        if self.showAll:
            self.stream.write('  ' + self.getDescription(test))
            self.stream.write(" ... ")
            self.stream.flush()

    def _setupStdout(self):
        """
        Capture stdout / stderr by replacing sys.stdout / sys.stderr
        """
        super(_XMLTestResult, self)._setupStdout()
        self.__stdout_saved = sys.stdout
        sys.stdout = _DuplicateWriter(sys.stdout, self._stdout_capture)
        self.__stderr_saved = sys.stderr
        sys.stderr = _DuplicateWriter(sys.stderr, self._stderr_capture)

    def _restoreStdout(self):
        """
        Stop capturing stdout / stderr and recover sys.stdout / sys.stderr
        """
        if self.__stdout_saved:
            sys.stdout = self.__stdout_saved
            self.__stdout_saved = None
        if self.__stderr_saved:
            sys.stderr = self.__stderr_saved
            self.__stderr_saved = None
        self._stdout_capture.seek(0)
        self._stdout_capture.truncate()
        self._stderr_capture.seek(0)
        self._stderr_capture.truncate()
        super(_XMLTestResult, self)._restoreStdout()

    def _save_output_data(self):
        self._stdout_data = self._stdout_capture.getvalue()
        self._stderr_data = self._stderr_capture.getvalue()

    def stopTest(self, test):
        """
        Called after execute each test method.
        """
        self._save_output_data()
        # self._stdout_data = sys.stdout.getvalue()
        # self._stderr_data = sys.stderr.getvalue()

        _TextTestResult.stopTest(self, test)
        self.stop_time = time()

        if self.callback and callable(self.callback):
            self.callback()
            self.callback = None

    def addSuccess(self, test):
        """
        Called when a test executes successfully.
        """
        self._save_output_data()
        self._prepare_callback(
            self.infoclass(self, test), self.successes, 'ok', '.'
        )

    @failfast
    def addFailure(self, test, err):
        """
        Called when a test method fails.
        """
        self._save_output_data()
        testinfo = self.infoclass(
            self, test, self.infoclass.FAILURE, err)
        self.failures.append((
            testinfo,
            self._exc_info_to_string(err, test)
        ))
        self._prepare_callback(testinfo, [], 'FAIL', 'F')

    @failfast
    def addError(self, test, err):
        """
        Called when a test method raises an error.
        """
        self._save_output_data()
        testinfo = self.infoclass(
            self, test, self.infoclass.ERROR, err)
        self.errors.append((
            testinfo,
            self._exc_info_to_string(err, test)
        ))
        self._prepare_callback(testinfo, [], 'ERROR', 'E')

    def addSubTest(self, testcase, test, err):
        """
        Called when a subTest method raises an error.
        """
        if err is not None:

            errorText = None
            errorValue = None
            errorList = None
            if issubclass(err[0], test.failureException):
                errorText = 'FAIL'
                errorValue = self.infoclass.FAILURE
                errorList = self.failures

            else:
                errorText = 'ERROR'
                errorValue = self.infoclass.ERROR
                errorList = self.errors

            self._save_output_data()

            testinfo = self.infoclass(
                self, testcase, errorValue, err, subTest=test)
            errorList.append((
                testinfo,
                self._exc_info_to_string(err, testcase)
            ))
            self._prepare_callback(testinfo, [], errorText, errorText[0])

    def addSkip(self, test, reason):
        """
        Called when a test method was skipped.
        """
        self._save_output_data()
        testinfo = self.infoclass(
            self, test, self.infoclass.SKIP, reason)
        testinfo.test_exception_name = 'skip'
        testinfo.test_exception_message = reason
        self.skipped.append((testinfo, reason))
        self._prepare_callback(testinfo, [], 'skip', 's')

    def addExpectedFailure(self, test, err):
        """
        Missing in xmlrunner, copy-pasted from xmlrunner addError.
        """
        self._save_output_data()

        testinfo = self.infoclass(self, test, self.infoclass.SKIP, err)
        testinfo.test_exception_name = 'XFAIL'
        testinfo.test_exception_message = 'expected failure: {}'.format(testinfo.test_exception_message)

        self.expectedFailures.append((testinfo, self._exc_info_to_string(err, test)))
        self._prepare_callback(testinfo, [], 'expected failure', 'x')

    @failfast
    def addUnexpectedSuccess(self, test):
        """
        Missing in xmlrunner, copy-pasted from xmlrunner addSuccess.
        """
        self._save_output_data()

        testinfo = self.infoclass(self, test)  # do not set outcome here because it will need exception
        testinfo.outcome = self.infoclass.ERROR
        # But since we want to have error outcome, we need to provide additional fields:
        testinfo.test_exception_name = 'UnexpectedSuccess'
        testinfo.test_exception_message = ('Unexpected success: This test was marked as expected failure but passed, '
                                           'please review it')

        self.unexpectedSuccesses.append((testinfo, 'unexpected success'))
        self._prepare_callback(testinfo, [], 'unexpected success', 'u')

    def printErrorList(self, flavour, errors):
        """
        Writes information about the FAIL or ERROR to the stream.
        """
        for test_info, dummy in errors:
            self.stream.writeln(self.separator1)
            self.stream.writeln(
                '%s [%.3fs]: %s' % (flavour, test_info.elapsed_time,
                                    test_info.test_description)
            )
            self.stream.writeln(self.separator2)
            self.stream.writeln('%s' % test_info.get_error_info())
            self.stream.flush()

    def _get_info_by_testcase(self):
        """
        Organizes test results by TestCase module. This information is
        used during the report generation, where a XML report will be created
        for each TestCase.
        """
        tests_by_testcase = {}

        for tests in (self.successes, self.failures, self.errors,
                      self.skipped, self.expectedFailures, self.unexpectedSuccesses):
            for test_info in tests:
                if isinstance(test_info, tuple):
                    # This is a skipped, error or a failure test case
                    test_info = test_info[0]
                testcase_name = test_info.test_name
                if testcase_name not in tests_by_testcase:
                    tests_by_testcase[testcase_name] = []
                tests_by_testcase[testcase_name].append(test_info)

        return tests_by_testcase

    def _report_testsuite_properties(xml_testsuite, xml_document, properties):
        if properties:
            xml_properties = xml_document.createElement('properties')
            xml_testsuite.appendChild(xml_properties)
            for key, value in properties.items():
                prop = xml_document.createElement('property')
                prop.setAttribute('name', str(key))
                prop.setAttribute('value', str(value))
                xml_properties.appendChild(prop)

    _report_testsuite_properties = staticmethod(_report_testsuite_properties)

    def _report_testsuite(suite_name, tests, xml_document, parentElement,
                          properties):
        """
        Appends the testsuite section to the XML document.
        """
        testsuite = xml_document.createElement('testsuite')
        parentElement.appendChild(testsuite)
        module_name = suite_name.rpartition('.')[0]
        file_name = module_name.replace('.', '/') + '.py'

        testsuite.setAttribute('name', suite_name)
        testsuite.setAttribute('tests', str(len(tests)))
        testsuite.setAttribute('file', file_name)

        testsuite.setAttribute(
            'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))
        )
        if tests:
            testsuite.setAttribute(
                'timestamp', max(map(lambda e: e.timestamp, tests))
            )
        failures = filter(lambda e: e.outcome == e.FAILURE, tests)
        testsuite.setAttribute('failures', str(len(list(failures))))

        errors = filter(lambda e: e.outcome == e.ERROR, tests)
        testsuite.setAttribute('errors', str(len(list(errors))))

        skips = filter(lambda e: e.outcome == _TestInfo.SKIP, tests)
        testsuite.setAttribute('skipped', str(len(list(skips))))

        _XMLTestResult._report_testsuite_properties(
            testsuite, xml_document, properties)

        for test in tests:
            _XMLTestResult._report_testcase(test, testsuite, xml_document)

        return testsuite

    _report_testsuite = staticmethod(_report_testsuite)

    def _test_method_name(test_id):
        """
        Returns the test method name.
        """
        return test_id.split('.')[-1]

    _test_method_name = staticmethod(_test_method_name)

    def _createCDATAsections(xmldoc, node, text):
        text = safe_unicode(text)
        pos = text.find(']]>')
        while pos >= 0:
            tmp = text[0:pos+2]
            cdata = xmldoc.createCDATASection(tmp)
            node.appendChild(cdata)
            text = text[pos+2:]
            pos = text.find(']]>')
        cdata = xmldoc.createCDATASection(text)
        node.appendChild(cdata)

    _createCDATAsections = staticmethod(_createCDATAsections)

    def _report_testcase(test_result, xml_testsuite, xml_document):
        """
        Appends a testcase section to the XML document.
        """
        testcase = xml_document.createElement('testcase')
        xml_testsuite.appendChild(testcase)

        class_name = re.sub(r'^__main__.', '', test_result.id())
        class_name = class_name.rpartition('.')[0]
        testcase.setAttribute('classname', class_name)
        testcase.setAttribute(
            'name', _XMLTestResult._test_method_name(test_result.test_id)
        )
        testcase.setAttribute('time', '%.3f' % test_result.elapsed_time)
        testcase.setAttribute('timestamp', test_result.timestamp)

        if test_result.filename is not None:
            # Try to make filename relative to current directory.
            filename = os.path.relpath(test_result.filename)
            filename = test_result.filename if filename.startswith('../') else filename
            testcase.setAttribute('file', filename)

        if test_result.lineno is not None:
            testcase.setAttribute('line', str(test_result.lineno))

        result_elem_name = test_result.OUTCOME_ELEMENTS[test_result.outcome]

        if result_elem_name is not None:
            result_elem = xml_document.createElement(result_elem_name)
            testcase.appendChild(result_elem)

            result_elem.setAttribute(
                'type',
                test_result.test_exception_name
            )
            result_elem.setAttribute(
                'message',
                test_result.test_exception_message
            )
            if test_result.get_error_info():
                error_info = safe_unicode(test_result.get_error_info())
                _XMLTestResult._createCDATAsections(
                    xml_document, result_elem, error_info)

        if test_result.stdout is not None:
            systemout = xml_document.createElement('system-out')
            testcase.appendChild(systemout)
            _XMLTestResult._createCDATAsections(
                xml_document, systemout, test_result.stdout)

        if test_result.stderr is not None:
            systemout = xml_document.createElement('system-err')
            testcase.appendChild(systemout)
            _XMLTestResult._createCDATAsections(
                xml_document, systemout, test_result.stderr)

    _report_testcase = staticmethod(_report_testcase)

    def generate_reports(self, test_runner):
        """
        Generates the XML reports to a given XMLTestRunner object.
        """
        from xml.dom.minidom import Document
        all_results = self._get_info_by_testcase()

        outputHandledAsString = \
            isinstance(test_runner.output, six.string_types)

        if (outputHandledAsString and not os.path.exists(test_runner.output)):
            os.makedirs(test_runner.output)

        if not outputHandledAsString:
            doc = Document()
            testsuite = doc.createElement('testsuites')
            doc.appendChild(testsuite)
            parentElement = testsuite

        for suite, tests in all_results.items():
            if outputHandledAsString:
                doc = Document()
                parentElement = doc

            suite_name = suite
            if test_runner.outsuffix:
                # not checking with 'is not None', empty means no suffix.
                suite_name = '%s-%s' % (suite, test_runner.outsuffix)

            # Build the XML file
            testsuite = _XMLTestResult._report_testsuite(
                suite_name, tests, doc, parentElement, self.properties
            )

            if outputHandledAsString:
                xml_content = doc.toprettyxml(
                    indent='\t',
                    encoding=test_runner.encoding
                )
                filename = path.join(
                    test_runner.output,
                    'TEST-%s.xml' % suite_name)
                with open(filename, 'wb') as report_file:
                    report_file.write(xml_content)

                if self.showAll:
                    self.stream.writeln('Generated XML report: {}'.format(filename))

        if not outputHandledAsString:
            # Assume that test_runner.output is a stream
            xml_content = doc.toprettyxml(
                indent='\t',
                encoding=test_runner.encoding
            )
            test_runner.output.write(xml_content)

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        if six.PY3:
            # It works fine in python 3
            try:
                return super(_XMLTestResult, self)._exc_info_to_string(
                    err, test)
            except AttributeError:
                # We keep going using the legacy python <= 2 way
                pass

        # This comes directly from python2 unittest
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next

        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            msgLines = traceback.format_exception(exctype, value, tb, length)
        else:
            msgLines = traceback.format_exception(exctype, value, tb)

        if self.buffer:
            output = self._stdout_capture.getvalue()
            error = self._stdout_capture.getvalue()
            if output:
                if not output.endswith('\n'):
                    output += '\n'
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith('\n'):
                    error += '\n'
                msgLines.append(STDERR_LINE % error)
        # This is the extra magic to make sure all lines are str
        encoding = getattr(sys.stdout, 'encoding', 'utf-8')
        lines = []
        for line in msgLines:
            if not isinstance(line, str):
                # utf8 shouldnt be hard-coded, but not sure f
                line = line.encode(encoding)
            lines.append(line)

        return ''.join(lines)
Пример #8
0
class TestResult(unittest.TestResult):
    """Holder for test result information.

    Test results are automatically managed by the TestCase and TestSuite
    classes, and do not need to be explicitly manipulated by writers of tests.

    Each instance holds the total number of tests run, and collections of
    failures and errors that occurred among those test runs. The collections
    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
    formatted traceback of the error that occurred.
    """
    _previousTestClass = None
    _moduleSetUpFailed = False

    def __init__(self, stream=None, descriptions=None, verbosity=None):
        self.failfast = False
        self.failures = []
        self.errors = []
        self.testsRun = 0
        self.skipped = []
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.shouldStop = False
        self.buffer = False
        self._stdout_buffer = None
        self._stderr_buffer = None
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr
        self._mirrorOutput = False

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.testsRun += 1
        self._mirrorOutput = False
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
                self._stdout_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def startTestRun(self):
        """Called once before any tests are executed.

        See startTest for a method called before each test.
        """

    def stopTest(self, test):
        """Called when the given test has been run"""
        if self.buffer:
            if self._mirrorOutput:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()
        self._mirrorOutput = False


    def stopTestRun(self):
        """Called once after all tests are executed.

        See stopTest for a method called after each test.
        """

    @failfast
    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    @failfast
    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    @failfast
    def addSubTest(self, test, subtest, err):
        """Called at the end of a subtest.
        'err' is None if the subtest ended successfully, otherwise it's a
        tuple of values as returned by sys.exc_info().
        """
        # By default, we don't do anything with successful subtests, but
        # more sophisticated test results might want to record them.
        if err is not None:
            if issubclass(err[0], test.failureException):
                errors = self.failures
            else:
                errors = self.errors
            errors.append((subtest, self._exc_info_to_string(err, test)))
            self._mirrorOutput = True

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        pass

    def addSkip(self, test, reason):
        """Called when a test is skipped."""
        self.skipped.append((test, reason))

    def addExpectedFailure(self, test, err):
        """Called when an expected failure/error occured."""
        self.expectedFailures.append(
            (test, self._exc_info_to_string(err, test)))

    @failfast
    def addUnexpectedSuccess(self, test):
        """Called when a test was expected to fail, but succeed."""
        self.unexpectedSuccesses.append(test)

    def wasSuccessful(self):
        """Tells whether or not this result was a success."""
        # The hasattr check is for test_result's OldResult test.  That
        # way this method works on objects that lack the attribute.
        # (where would such result intances come from? old stored pickles?)
        return ((len(self.failures) == len(self.errors) == 0) and
                (not hasattr(self, 'unexpectedSuccesses') or
                 len(self.unexpectedSuccesses) == 0))

    def stop(self):
        """Indicates that the tests should be aborted."""
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next
        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            msgLines = traceback.format_exception(exctype, value, tb, length)
        else:
            msgLines = traceback.format_exception(exctype, value, tb)

        if self.buffer:
            output = sys.stdout.getvalue()
            error = sys.stderr.getvalue()
            if output:
                if not output.endswith('\n'):
                    output += '\n'
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith('\n'):
                    error += '\n'
                msgLines.append(STDERR_LINE % error)
        return ''.join(msgLines)

    def _is_relevant_tb_level(self, tb):
        return '__unittest' in tb.tb_frame.f_globals

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return "<%s run=%i errors=%i failures=%i>" % \
               (util.strclass(self.__class__), self.testsRun, len(self.errors),
                len(self.failures))