Example #1
0
    def report(self, source, timeout, return_code, results, output):
        """
        Report on the result of running doctests on a given source.

        This doesn't print the :meth:`report_head`, which is assumed
        to be printed already.

        INPUT:

        - ``source`` -- a source from :mod:`sage.doctest.sources`

        - ``timeout`` -- a boolean, whether doctests timed out

        - ``return_code`` -- an int, the return code of the process
          running doctests on that file.

        - ``results`` -- (irrelevant if ``timeout`` or
          ``return_code``), a tuple

          - ``ntests`` -- the number of doctests

          - ``timings`` -- a
            :class:`sage.doctest.sources.DictAsObject` instance
            storing timing data.

        - ``output`` -- a string, printed if there was some kind of
          failure

        EXAMPLES::

            sage: from sage.doctest.reporting import DocTestReporter
            sage: from sage.doctest.control import DocTestController, DocTestDefaults
            sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
            sage: from sage.doctest.forker import SageDocTestRunner
            sage: from sage.doctest.parsing import SageOutputChecker
            sage: from sage.doctest.util import Timer
            sage: import os, sys, doctest
            sage: filename = os.path.join(os.environ['SAGE_ROOT'],'devel','sage','sage','doctest','reporting.py')
            sage: FDS = FileDocTestSource(filename,True,False,set(['sage']),None)
            sage: DD = DocTestDefaults()
            sage: DC = DocTestController(DD,[filename])
            sage: DTR = DocTestReporter(DC)

        You can report a timeout::

            sage: DTR.report(FDS, True, 0, None, "Output so far...")
                Time out
            **********************************************************************
            Tests run before process timed out:
            Output so far...
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or a process that returned a bad exit code::

            sage: DTR.report(FDS, False, 3, None, "Output before trouble")
                Bad exit: 3
            **********************************************************************
            Tests run before process failed:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or a process that segfaulted::

            sage: import signal
            sage: DTR.report(FDS, False, -signal.SIGSEGV, None, "Output before trouble")
                Killed due to segmentation fault
            **********************************************************************
            Tests run before process failed:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Report a timeout with results and a ``SIGKILL``::

            sage: DTR.report(FDS, True, -signal.SIGKILL, (1,None), "Output before trouble")
                Time out after testing finished (and interrupt failed)
            **********************************************************************
            Tests run before process timed out:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        This is an internal error since results is None::

            sage: DTR.report(FDS, False, 0, None, "All output")
                Error in doctesting framework (bad result returned)
            **********************************************************************
            Tests run before error:
            All output
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or tell the user that everything succeeded::

            sage: doctests, extras = FDS.create_doctests(globals())
            sage: runner = SageDocTestRunner(SageOutputChecker(), verbose=False, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
            sage: Timer().start().stop().annotate(runner)
            sage: D = DictAsObject({'err':None})
            sage: runner.update_results(D)
            0
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
                [... tests, 0.0 s]
            sage: DTR.stats
            {'sage.doctest.reporting': {'walltime': ...}}

        Or inform the user that some doctests failed::

            sage: runner.failures = 1
            sage: runner.update_results(D)
            1
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Doctest output including the failure...")
                [... tests, 1 failure, 0.0 s]
        """
        try:
            log = self.controller.log
            postscript = self.postscript
            stats = self.stats
            basename = source.basename
            cmd = self.report_head(source)

            try:
                ntests, result_dict = results
            except (TypeError, ValueError):
                ntests = 0
                result_dict = DictAsObject(dict(err='badresult'))

            if timeout:
                fail_msg = "Time out"
                if ntests > 0:
                    fail_msg += " after testing finished"
                if return_code > 0:
                    fail_msg += " (with error after interrupt)"
                elif return_code < 0:
                    sig = -return_code
                    if sig == signal.SIGKILL:
                        fail_msg += " (and interrupt failed)"
                    else:
                        fail_msg += " (with %s after interrupt)"%signal_name(-return_code)
                log("    %s\n%s\nTests run before process timed out:"%(fail_msg, "*"*70))
                log(output)
                log("*"*70)
                postscript['lines'].append(cmd + "  # %s"%fail_msg)
                stats[basename] = dict(failed=True, walltime=1e6)
                self.error_status |= 4
            elif return_code:
                if return_code > 0:
                    fail_msg = "Bad exit: %s"%return_code
                else:
                    fail_msg = "Killed due to %s"%signal_name(-return_code)
                if ntests > 0:
                    fail_msg += " after testing finished"
                log("    %s\n%s\nTests run before process failed:"%(fail_msg,"*"*70))
                log(output)
                log("*"*70)
                postscript['lines'].append(cmd + "  # %s" % fail_msg)
                stats[basename] = dict(failed=True, walltime=1e6)
                self.error_status |= (8 if return_code > 0 else 16)
            else:
                if hasattr(result_dict, 'walltime') and hasattr(result_dict.walltime, '__len__') and len(result_dict.walltime) > 0:
                    wall = sum(result_dict.walltime) / len(result_dict.walltime)
                else:
                    wall = 1e6
                if hasattr(result_dict, 'cputime') and hasattr(result_dict.cputime, '__len__') and len(result_dict.cputime) > 0:
                    cpu = sum(result_dict.cputime) / len(result_dict.cputime)
                else:
                    cpu = 1e6
                if result_dict.err == 'badresult':
                    log("    Error in doctesting framework (bad result returned)\n%s\nTests run before error:"%("*"*70))
                    log(output)
                    log("*"*70)
                    postscript['lines'].append(cmd + "  # Testing error: bad result")
                    self.error_status |= 64
                elif result_dict.err == 'noresult':
                    log("    Error in doctesting framework (no result returned)\n%s\nTests run before error:"%("*"*70))
                    log(output)
                    log("*"*70)
                    postscript['lines'].append(cmd + "  # Testing error: no result")
                    self.error_status |= 64
                elif result_dict.err == 'tab':
                    if len(result_dict.tab_linenos) > 5:
                        result_dict.tab_linenos[3:-1] = "..."
                    tabs = " " + ",".join(result_dict.tab_linenos)
                    if len(result_dict.tab_linenos) > 1:
                        tabs = "s" + tabs
                    log("    Error: TAB character found at line%s"%(tabs))
                    postscript['lines'].append(cmd + "  # Tab character found")
                    self.error_status |= 32
                elif result_dict.err is not None:
                    # This case should not occur
                    if result_dict.err is True:
                        fail_msg = "Error in doctesting framework"
                    else:
                        if hasattr(result_dict.err, '__name__'):
                            err = result_dict.err.__name__
                        else:
                            err = repr(result_dict.err)
                        fail_msg = "%s in doctesting framework"%err

                    log("    %s\n%s"%(fail_msg, "*"*70))
                    if output:
                        log("Tests run before doctest exception:\n" + output)
                        log("*"*70)
                    postscript['lines'].append(cmd + "  # %s"%fail_msg)
                    if hasattr(result_dict, 'tb'):
                        log(result_dict.tb)
                    if hasattr(result_dict, 'walltime'):
                        stats[basename] = dict(failed=True, walltime=wall)
                    else:
                        stats[basename] = dict(failed=True, walltime=1e6)
                    self.error_status |= 64
                if result_dict.err is None or result_dict.err == 'tab':
                    f = result_dict.failures
                    if f:
                        postscript['lines'].append(cmd + "  # %s failed" % (count_noun(f, "doctest")))
                        self.error_status |= 1
                    if f or result_dict.err == 'tab':
                        stats[basename] = dict(failed=True, walltime=wall)
                    else:
                        stats[basename] = dict(walltime=wall)
                    postscript['cputime'] += cpu
                    postscript['walltime'] += wall

                    log("    [%s, %s%.1f s]" % (count_noun(ntests, "test"), "%s, "%(count_noun(f, "failure")) if f else "", wall))
            self.sources_completed += 1

        except StandardError:
            import traceback
            traceback.print_exc()
Example #2
0
    def report(self, source, timeout, return_code, results, output, pid=None):
        """
        Report on the result of running doctests on a given source.

        This doesn't print the :meth:`report_head`, which is assumed
        to be printed already.

        INPUT:

        - ``source`` -- a source from :mod:`sage.doctest.sources`

        - ``timeout`` -- a boolean, whether doctests timed out

        - ``return_code`` -- an int, the return code of the process
          running doctests on that file.

        - ``results`` -- (irrelevant if ``timeout`` or
          ``return_code``), a tuple

          - ``ntests`` -- the number of doctests

          - ``timings`` -- a
            :class:`sage.doctest.sources.DictAsObject` instance
            storing timing data.

        - ``output`` -- a string, printed if there was some kind of
          failure

        - ``pid`` -- optional integer (default: ``None``). The pid of
          the worker process.

        EXAMPLES::

            sage: from sage.doctest.reporting import DocTestReporter
            sage: from sage.doctest.control import DocTestController, DocTestDefaults
            sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
            sage: from sage.doctest.forker import SageDocTestRunner
            sage: from sage.doctest.parsing import SageOutputChecker
            sage: from sage.doctest.util import Timer
            sage: from sage.env import SAGE_SRC
            sage: import os, sys, doctest
            sage: filename = os.path.join(SAGE_SRC,'sage','doctest','reporting.py')
            sage: DD = DocTestDefaults()
            sage: FDS = FileDocTestSource(filename,DD)
            sage: DC = DocTestController(DD,[filename])
            sage: DTR = DocTestReporter(DC)

        You can report a timeout::

            sage: DTR.report(FDS, True, 0, None, "Output so far...", pid=1234)
                Timed out
            **********************************************************************
            Tests run before process (pid=1234) timed out:
            Output so far...
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or a process that returned a bad exit code::

            sage: DTR.report(FDS, False, 3, None, "Output before trouble")
                Bad exit: 3
            **********************************************************************
            Tests run before process failed:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or a process that segfaulted::

            sage: import signal
            sage: DTR.report(FDS, False, -signal.SIGSEGV, None, "Output before trouble")
                Killed due to segmentation fault
            **********************************************************************
            Tests run before process failed:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Report a timeout with results and a ``SIGKILL``::

            sage: DTR.report(FDS, True, -signal.SIGKILL, (1,None), "Output before trouble")
                Timed out after testing finished (and interrupt failed)
            **********************************************************************
            Tests run before process timed out:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        This is an internal error since results is None::

            sage: DTR.report(FDS, False, 0, None, "All output")
                Error in doctesting framework (bad result returned)
            **********************************************************************
            Tests run before error:
            All output
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or tell the user that everything succeeded::

            sage: doctests, extras = FDS.create_doctests(globals())
            sage: runner = SageDocTestRunner(SageOutputChecker(), verbose=False, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
            sage: Timer().start().stop().annotate(runner)
            sage: D = DictAsObject({'err':None})
            sage: runner.update_results(D)
            0
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
                [... tests, ... s]
            sage: DTR.stats
            {'sage.doctest.reporting': {'walltime': ...}}

        Or inform the user that some doctests failed::

            sage: runner.failures = 1
            sage: runner.update_results(D)
            1
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Doctest output including the failure...")
                [... tests, 1 failure, ... s]

        If the user has requested that we report on skipped doctests,
        we do so::

            sage: DC.options = DocTestDefaults(show_skipped=True)
            sage: import collections
            sage: optionals = collections.defaultdict(int)
            sage: optionals['magma'] = 5; optionals['long time'] = 4; optionals[''] = 1; optionals['not tested'] = 2
            sage: D = DictAsObject(dict(err=None,optionals=optionals))
            sage: runner.failures = 0
            sage: runner.update_results(D)
            0
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
                1 unlabeled test not run
                4 long tests not run
                5 magma tests not run
                2 not tested tests not run
                0 tests not run because we ran out of time
                [... tests, ... s]

        Test an internal error in the reporter::

            sage: DTR.report(None, None, None, None, None)
            Traceback (most recent call last):
            ...
            AttributeError: 'NoneType' object has no attribute 'basename'

        The only-errors mode does not output anything on success::

            sage: DD = DocTestDefaults(only_errors=True)
            sage: FDS = FileDocTestSource(filename, DD)
            sage: DC = DocTestController(DD, [filename])
            sage: DTR = DocTestReporter(DC)
            sage: doctests, extras = FDS.create_doctests(globals())
            sage: runner = SageDocTestRunner(SageOutputChecker(), verbose=False, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
            sage: Timer().start().stop().annotate(runner)
            sage: D = DictAsObject({'err':None})
            sage: runner.update_results(D)
            0
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")

        However, failures are still output in the errors-only mode::

            sage: runner.failures = 1
            sage: runner.update_results(D)
            1
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Failed test")
                [... tests, 1 failure, ... s]
        """
        log = self.controller.log
        process_name = 'process (pid={0})'.format(pid) if pid else 'process'
        try:
            postscript = self.postscript
            stats = self.stats
            basename = source.basename
            cmd = self.report_head(source)
            try:
                ntests, result_dict = results
            except (TypeError, ValueError):
                ntests = 0
                result_dict = DictAsObject(dict(err='badresult'))
            if timeout:
                fail_msg = "Timed out"
                if ntests > 0:
                    fail_msg += " after testing finished"
                if return_code > 0:
                    fail_msg += " (with error after interrupt)"
                elif return_code < 0:
                    sig = -return_code
                    if sig == signal.SIGQUIT:
                        pass  # and interrupt succeeded
                    elif sig == signal.SIGKILL:
                        fail_msg += " (and interrupt failed)"
                    else:
                        fail_msg += " (with %s after interrupt)" % signal_name(
                            sig)
                log("    %s\n%s\nTests run before %s timed out:" %
                    (fail_msg, "*" * 70, process_name))
                log(output)
                log("*" * 70)
                postscript['lines'].append(cmd + "  # %s" % fail_msg)
                stats[basename] = dict(failed=True, walltime=1e6)
                self.error_status |= 4
            elif return_code:
                if return_code > 0:
                    fail_msg = "Bad exit: %s" % return_code
                else:
                    fail_msg = "Killed due to %s" % signal_name(-return_code)
                if ntests > 0:
                    fail_msg += " after testing finished"
                log("    %s\n%s\nTests run before %s failed:" %
                    (fail_msg, "*" * 70, process_name))
                log(output)
                log("*" * 70)
                postscript['lines'].append(cmd + "  # %s" % fail_msg)
                stats[basename] = dict(failed=True, walltime=1e6)
                self.error_status |= (8 if return_code > 0 else 16)
            else:
                if hasattr(result_dict, 'walltime') and hasattr(
                        result_dict.walltime,
                        '__len__') and len(result_dict.walltime) > 0:
                    wall = sum(result_dict.walltime) / len(
                        result_dict.walltime)
                else:
                    wall = 1e6
                if hasattr(result_dict, 'cputime') and hasattr(
                        result_dict.cputime,
                        '__len__') and len(result_dict.cputime) > 0:
                    cpu = sum(result_dict.cputime) / len(result_dict.cputime)
                else:
                    cpu = 1e6
                if result_dict.err == 'badresult':
                    log("    Error in doctesting framework (bad result returned)\n%s\nTests run before error:"
                        % ("*" * 70))
                    log(output)
                    log("*" * 70)
                    postscript['lines'].append(cmd +
                                               "  # Testing error: bad result")
                    self.error_status |= 64
                elif result_dict.err == 'noresult':
                    log("    Error in doctesting framework (no result returned)\n%s\nTests run before error:"
                        % ("*" * 70))
                    log(output)
                    log("*" * 70)
                    postscript['lines'].append(cmd +
                                               "  # Testing error: no result")
                    self.error_status |= 64
                elif result_dict.err == 'tab':
                    if len(result_dict.tab_linenos) > 5:
                        result_dict.tab_linenos[3:-1] = "..."
                    tabs = " " + ",".join(result_dict.tab_linenos)
                    if len(result_dict.tab_linenos) > 1:
                        tabs = "s" + tabs
                    log("    Error: TAB character found at line%s" % (tabs))
                    postscript['lines'].append(cmd + "  # Tab character found")
                    self.error_status |= 32
                elif result_dict.err == 'line_number':
                    log("    Error: Source line number found")
                    postscript['lines'].append(cmd +
                                               "  # Source line number found")
                    self.error_status |= 256
                elif result_dict.err is not None:
                    # This case should not occur
                    if result_dict.err is True:
                        fail_msg = "Error in doctesting framework"
                    else:
                        if hasattr(result_dict.err, '__name__'):
                            err = result_dict.err.__name__
                        else:
                            err = repr(result_dict.err)
                        fail_msg = "%s in doctesting framework" % err

                    log("    %s\n%s" % (fail_msg, "*" * 70))
                    if output:
                        log("Tests run before doctest exception:\n" + output)
                        log("*" * 70)
                    postscript['lines'].append(cmd + "  # %s" % fail_msg)
                    if hasattr(result_dict, 'tb'):
                        log(result_dict.tb)
                    if hasattr(result_dict, 'walltime'):
                        stats[basename] = dict(failed=True, walltime=wall)
                    else:
                        stats[basename] = dict(failed=True, walltime=1e6)
                    self.error_status |= 64
                if result_dict.err is None or result_dict.err == 'tab':
                    f = result_dict.failures
                    if f:
                        postscript['lines'].append(cmd + "  # %s failed" %
                                                   (count_noun(f, "doctest")))
                        self.error_status |= 1
                    if f or result_dict.err == 'tab':
                        stats[basename] = dict(failed=True, walltime=wall)
                    else:
                        stats[basename] = dict(walltime=wall)
                    postscript['cputime'] += cpu
                    postscript['walltime'] += wall

                    try:
                        optionals = result_dict.optionals
                    except AttributeError:
                        optionals = dict()
                    for tag in sorted(optionals):
                        nskipped = optionals[tag]
                        if tag == "long time":
                            if not self.controller.options.long:
                                if self.controller.options.show_skipped:
                                    log("    %s not run" %
                                        (count_noun(nskipped, "long test")))
                        elif tag == "high_mem":
                            if self.controller.options.memlimit <= 0:
                                seen_other = True
                                log("    %s not run" %
                                    (count_noun(nskipped, "high mem")))
                        elif tag == "not tested":
                            if self.controller.options.show_skipped:
                                log("    %s not run" %
                                    (count_noun(nskipped, "not tested test")))
                        elif tag == "not implemented":
                            if self.controller.options.show_skipped:
                                log("    %s for not implemented functionality not run"
                                    % (count_noun(nskipped, "test")))
                        else:
                            if not self.have_optional_tag(tag):
                                if tag == "bug":
                                    if self.controller.options.show_skipped:
                                        log("    %s not run due to known bugs"
                                            % (count_noun(nskipped, "test")))
                                elif tag == "":
                                    if self.controller.options.show_skipped:
                                        log("    %s not run" % (count_noun(
                                            nskipped, "unlabeled test")))
                                else:
                                    if self.controller.options.show_skipped:
                                        log("    %s not run" % (count_noun(
                                            nskipped, tag + " test")))

                    nskipped = result_dict.walltime_skips
                    if self.controller.options.show_skipped:
                        log("    %s not run because we ran out of time" %
                            (count_noun(nskipped, "test")))

                    if nskipped != 0:
                        # It would be nice to report "a/b tests run" instead of
                        # the percentage that is printed here.  However, it is
                        # not clear how to pull out the actual part of "ntests"
                        # that has been run for a variety of reasons, such as
                        # the sig_on_count() tests, the possibility to run
                        # tests multiple times, and some other unclear mangling
                        # of these numbers that was not clear to the author.
                        ntests_run = result_dict.tests
                        total = "%d%% of tests run" % (round(
                            100 * ntests_run / float(ntests_run + nskipped)))
                    else:
                        total = count_noun(ntests, "test")
                    if not (self.controller.options.only_errors and not f):
                        log("    [%s, %s%.2f s]" %
                            (total, "%s, " %
                             (count_noun(f, "failure")) if f else "", wall))

            self.sources_completed += 1

        except Exception:
            import traceback
            log(traceback.format_exc(), end="")
Example #3
0
    def report(self, source, timeout, return_code, results, output, pid=None):
        """
        Report on the result of running doctests on a given source.

        This doesn't print the :meth:`report_head`, which is assumed
        to be printed already.

        INPUT:

        - ``source`` -- a source from :mod:`sage.doctest.sources`

        - ``timeout`` -- a boolean, whether doctests timed out

        - ``return_code`` -- an int, the return code of the process
          running doctests on that file.

        - ``results`` -- (irrelevant if ``timeout`` or
          ``return_code``), a tuple

          - ``ntests`` -- the number of doctests

          - ``timings`` -- a
            :class:`sage.doctest.sources.DictAsObject` instance
            storing timing data.

        - ``output`` -- a string, printed if there was some kind of
          failure

        - ``pid`` -- optional integer (default: ``None``). The pid of
          the worker process.

        EXAMPLES::

            sage: from sage.doctest.reporting import DocTestReporter
            sage: from sage.doctest.control import DocTestController, DocTestDefaults
            sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
            sage: from sage.doctest.forker import SageDocTestRunner
            sage: from sage.doctest.parsing import SageOutputChecker
            sage: from sage.doctest.util import Timer
            sage: from sage.env import SAGE_SRC
            sage: import os, sys, doctest
            sage: filename = os.path.join(SAGE_SRC,'sage','doctest','reporting.py')
            sage: DD = DocTestDefaults()
            sage: FDS = FileDocTestSource(filename,DD)
            sage: DC = DocTestController(DD,[filename])
            sage: DTR = DocTestReporter(DC)

        You can report a timeout::

            sage: DTR.report(FDS, True, 0, None, "Output so far...", pid=1234)
                Timed out
            **********************************************************************
            Tests run before process (pid=1234) timed out:
            Output so far...
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or a process that returned a bad exit code::

            sage: DTR.report(FDS, False, 3, None, "Output before trouble")
                Bad exit: 3
            **********************************************************************
            Tests run before process failed:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or a process that segfaulted::

            sage: import signal
            sage: DTR.report(FDS, False, -signal.SIGSEGV, None, "Output before trouble")
                Killed due to segmentation fault
            **********************************************************************
            Tests run before process failed:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Report a timeout with results and a ``SIGKILL``::

            sage: DTR.report(FDS, True, -signal.SIGKILL, (1,None), "Output before trouble")
                Timed out after testing finished (and interrupt failed)
            **********************************************************************
            Tests run before process timed out:
            Output before trouble
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        This is an internal error since results is None::

            sage: DTR.report(FDS, False, 0, None, "All output")
                Error in doctesting framework (bad result returned)
            **********************************************************************
            Tests run before error:
            All output
            **********************************************************************
            sage: DTR.stats
            {'sage.doctest.reporting': {'failed': True, 'walltime': 1000000.0}}

        Or tell the user that everything succeeded::

            sage: doctests, extras = FDS.create_doctests(globals())
            sage: runner = SageDocTestRunner(SageOutputChecker(), verbose=False, sage_options=DD, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
            sage: Timer().start().stop().annotate(runner)
            sage: D = DictAsObject({'err':None})
            sage: runner.update_results(D)
            0
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
                [... tests, ... s]
            sage: DTR.stats
            {'sage.doctest.reporting': {'walltime': ...}}

        Or inform the user that some doctests failed::

            sage: runner.failures = 1
            sage: runner.update_results(D)
            1
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Doctest output including the failure...")
                [... tests, 1 failure, ... s]

        If the user has requested that we report on skipped doctests,
        we do so::

            sage: DC.options = DocTestDefaults(show_skipped=True)
            sage: import collections
            sage: optionals = collections.defaultdict(int)
            sage: optionals['magma'] = 5; optionals['long time'] = 4; optionals[''] = 1; optionals['not tested'] = 2
            sage: D = DictAsObject(dict(err=None,optionals=optionals))
            sage: runner.failures = 0
            sage: runner.update_results(D)
            0
            sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
                1 unlabeled test not run
                4 long tests not run
                5 magma tests not run
                2 other tests skipped
                [... tests, ... s]

        Test an internal error in the reporter::

            sage: DTR.report(None, None, None, None, None)
            Traceback (most recent call last):
            ...
            AttributeError: 'NoneType' object has no attribute 'basename'
        """
        log = self.controller.log
        process_name = 'process (pid={0})'.format(pid) if pid else 'process'
        try:
            postscript = self.postscript
            stats = self.stats
            basename = source.basename
            cmd = self.report_head(source)

            try:
                ntests, result_dict = results
            except (TypeError, ValueError):
                ntests = 0
                result_dict = DictAsObject(dict(err='badresult'))

            if timeout:
                fail_msg = "Timed out"
                if ntests > 0:
                    fail_msg += " after testing finished"
                if return_code > 0:
                    fail_msg += " (with error after interrupt)"
                elif return_code < 0:
                    sig = -return_code
                    if sig == signal.SIGKILL:
                        fail_msg += " (and interrupt failed)"
                    else:
                        fail_msg += " (with %s after interrupt)"%signal_name(sig)
                log("    %s\n%s\nTests run before %s timed out:"%(fail_msg, "*"*70, process_name))
                log(output)
                log("*"*70)
                postscript['lines'].append(cmd + "  # %s"%fail_msg)
                stats[basename] = dict(failed=True, walltime=1e6)
                self.error_status |= 4
            elif return_code:
                if return_code > 0:
                    fail_msg = "Bad exit: %s"%return_code
                else:
                    fail_msg = "Killed due to %s"%signal_name(-return_code)
                if ntests > 0:
                    fail_msg += " after testing finished"
                log("    %s\n%s\nTests run before %s failed:"%(fail_msg,"*"*70, process_name))
                log(output)
                log("*"*70)
                postscript['lines'].append(cmd + "  # %s" % fail_msg)
                stats[basename] = dict(failed=True, walltime=1e6)
                self.error_status |= (8 if return_code > 0 else 16)
            else:
                if hasattr(result_dict, 'walltime') and hasattr(result_dict.walltime, '__len__') and len(result_dict.walltime) > 0:
                    wall = sum(result_dict.walltime) / len(result_dict.walltime)
                else:
                    wall = 1e6
                if hasattr(result_dict, 'cputime') and hasattr(result_dict.cputime, '__len__') and len(result_dict.cputime) > 0:
                    cpu = sum(result_dict.cputime) / len(result_dict.cputime)
                else:
                    cpu = 1e6
                if result_dict.err == 'badresult':
                    log("    Error in doctesting framework (bad result returned)\n%s\nTests run before error:"%("*"*70))
                    log(output)
                    log("*"*70)
                    postscript['lines'].append(cmd + "  # Testing error: bad result")
                    self.error_status |= 64
                elif result_dict.err == 'noresult':
                    log("    Error in doctesting framework (no result returned)\n%s\nTests run before error:"%("*"*70))
                    log(output)
                    log("*"*70)
                    postscript['lines'].append(cmd + "  # Testing error: no result")
                    self.error_status |= 64
                elif result_dict.err == 'tab':
                    if len(result_dict.tab_linenos) > 5:
                        result_dict.tab_linenos[3:-1] = "..."
                    tabs = " " + ",".join(result_dict.tab_linenos)
                    if len(result_dict.tab_linenos) > 1:
                        tabs = "s" + tabs
                    log("    Error: TAB character found at line%s"%(tabs))
                    postscript['lines'].append(cmd + "  # Tab character found")
                    self.error_status |= 32
                elif result_dict.err is not None:
                    # This case should not occur
                    if result_dict.err is True:
                        fail_msg = "Error in doctesting framework"
                    else:
                        if hasattr(result_dict.err, '__name__'):
                            err = result_dict.err.__name__
                        else:
                            err = repr(result_dict.err)
                        fail_msg = "%s in doctesting framework"%err

                    log("    %s\n%s"%(fail_msg, "*"*70))
                    if output:
                        log("Tests run before doctest exception:\n" + output)
                        log("*"*70)
                    postscript['lines'].append(cmd + "  # %s"%fail_msg)
                    if hasattr(result_dict, 'tb'):
                        log(result_dict.tb)
                    if hasattr(result_dict, 'walltime'):
                        stats[basename] = dict(failed=True, walltime=wall)
                    else:
                        stats[basename] = dict(failed=True, walltime=1e6)
                    self.error_status |= 64
                if result_dict.err is None or result_dict.err == 'tab':
                    f = result_dict.failures
                    if f:
                        postscript['lines'].append(cmd + "  # %s failed" % (count_noun(f, "doctest")))
                        self.error_status |= 1
                    if f or result_dict.err == 'tab':
                        stats[basename] = dict(failed=True, walltime=wall)
                    else:
                        stats[basename] = dict(walltime=wall)
                    postscript['cputime'] += cpu
                    postscript['walltime'] += wall

                    if self.controller.options.show_skipped:
                        try:
                            optionals = result_dict.optionals
                        except AttributeError:
                            optionals = dict()
                        if self.controller.options.optional is not True: # if True we test all optional tags
                            untested = 0  # Report not tested/implemented tests at the end
                            seen_other = False
                            for tag in sorted(optionals.keys()):
                                nskipped = optionals[tag]
                                if tag == "long time":
                                    if not self.controller.options.long:
                                        seen_other = True
                                        log("    %s not run"%(count_noun(nskipped, "long test")))
                                elif tag in ("not tested", "not implemented"):
                                    untested += nskipped
                                else:
                                    if tag not in self.controller.options.optional:
                                        seen_other = True
                                        if tag == "bug":
                                            log("    %s not run due to known bugs"%(count_noun(nskipped, "test")))
                                        elif tag == "":
                                            log("    %s not run"%(count_noun(nskipped, "unlabeled test")))
                                        else:
                                            log("    %s not run"%(count_noun(nskipped, tag + " test")))
                            if untested:
                                log ("    %s skipped"%(count_noun(untested, "%stest"%("other " if seen_other else ""))))
                    log("    [%s, %s%.2f s]" % (count_noun(ntests, "test"), "%s, "%(count_noun(f, "failure")) if f else "", wall))
            self.sources_completed += 1

        except Exception:
            import traceback
            log(traceback.format_exc(), end="")