コード例 #1
0
ファイル: runner.py プロジェクト: gotcha/xdoctest
def _convert_to_test_module(enabled_examples):
    """
    Converts all doctests to unit tests that can exist in a standalone module
    """
    module_lines = []
    for example in enabled_examples:
        # Create a unit-testable function for this example
        func_name = 'test_' + example.callname.replace('.', '_')
        body_lines = []
        for part in example._parts:
            body_part = part.format_src(linenos=False,
                                        want=False,
                                        prefix=False,
                                        colored=False,
                                        partnos=False)
            if part.want:
                want_text = '# doctest want:\n'
                want_text += utils.indent(part.want, '# ')
                body_part += '\n' + want_text
            body_lines.append(body_part)
        body = '\n'.join(body_lines)
        func_text = 'def {}():\n'.format(func_name) + utils.indent(body)
        module_lines.append(func_text)

    module_text = '\n\n\n'.join(module_lines)
    return module_text
コード例 #2
0
ファイル: test_traceback.py プロジェクト: MatsLanGoH/xdoctest
def _run_case(source):
    from xdoctest import utils
    COLOR = 'yellow'

    def cprint(msg, color=COLOR):
        print(utils.color_text(str(msg), COLOR))

    cprint('\n\n' '\n <RUN CASE> ' '\n  ========  ' '\n', COLOR)

    cprint('DOCTEST SOURCE:')
    cprint('---------------')
    print(
        utils.indent(
            utils.add_line_numbers(utils.highlight_code(source, 'python'))))

    print('')

    import hashlib
    hasher = hashlib.sha1()
    hasher.update(source.encode('utf8'))
    hashid = hasher.hexdigest()[0:8]

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_linenos_' + hashid + '.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout(supress=False) as cap:
            runner.doctest_module(modpath, 'all', argv=[''])

    cprint('\n\n --- </END RUN CASE> --- \n\n', COLOR)
    return cap.text
コード例 #3
0
def test_runner_syntax_error():
    """
        python testing/test_errors.py test_runner_syntax_error
    """
    source = utils.codeblock(
        '''
        def test_parsetime_syntax_error1():
            """
                Example:
                    >>> from __future__ import print_function
                    >>> print 'Parse-Time Syntax Error'
            """

        def test_parsetime_syntax_error2():
            """
                Example:
                    >>> def bad_syntax() return for
            """

        def test_runtime_error():
            """
                Example:
                    >>> print('Runtime Error {}'.format(5 / 0))
            """

        def test_runtime_name_error():
            """
                Example:
                    >>> print('Name Error {}'.format(foo))
            """

        def test_runtime_warning():
            """
                Example:
                    >>> import warnings
                    >>> warnings.warn('in-code warning')
            """
        ''')

    temp = utils.TempDir(persist=True)
    temp.ensure()
    dpath = temp.dpath
    modpath = join(dpath, 'test_runner_syntax_error.py')
    open(modpath, 'w').write(source)

    with utils.CaptureStdout() as cap:
        runner.doctest_module(modpath, 'all', argv=[''], style='freeform',
                              verbose=0)

    print(utils.indent(cap.text))

    assert '1 run-time warnings' in cap.text
    assert '2 parse-time warnings' in cap.text

    # Assert summary line
    assert '3 warnings' in cap.text
    assert '2 failed' in cap.text
    assert '1 passed' in cap.text
コード例 #4
0
def _run_case(source, style='auto'):
    """
    Runs all doctests in a source block

    Args:
        source (str): source code of an entire file

    TODO: run case is over-duplicated and should be separated into a test utils directory
    """
    from xdoctest import utils
    from xdoctest import runner
    COLOR = 'yellow'
    def cprint(msg, color=COLOR):
        print(utils.color_text(str(msg), COLOR))
    cprint('\n\n'
           '\n <RUN CASE> '
           '\n  ========  '
           '\n', COLOR)

    cprint('CASE SOURCE:')
    cprint('------------')
    print(utils.indent(
        utils.add_line_numbers(utils.highlight_code(source, 'python'))))

    print('')

    import hashlib
    hasher = hashlib.sha1()
    hasher.update(source.encode('utf8'))
    hashid = hasher.hexdigest()[0:8]

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_linenos_' + hashid + '.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout(supress=False) as cap:
            runner.doctest_module(modpath, 'all', argv=[''], style=style)

    cprint('\n\n --- </END RUN CASE> --- \n\n', COLOR)
    return cap.text
コード例 #5
0
ファイル: doctest_part.py プロジェクト: jayvdb/xdoctest
    def format_part(self,
                    linenos=True,
                    want=True,
                    startline=1,
                    n_digits=None,
                    colored=False,
                    partnos=False,
                    prefix=True):
        """
        Customizable formatting of the source and want for this doctest.

        CommandLine:
            python -m xdoctest.doctest_part DoctestPart.format_part

        Args:
            linenos (bool): show line numbers
            want (bool): include the want value if it exists
            startline (int): offsets the line numbering
            n_digits (int): number of digits to use for line numbers
            colored (bool): pygmentize the colde
            partnos (bool): if True, shows the part number in the string
            prefix (bool): if False, exclude the doctest `>>> ` prefix

        CommandLine:
            python -m xdoctest.doctest_part DoctestPart.format_part:0

        Example:
            >>> from xdoctest.parser import *
            >>> self = DoctestPart(exec_lines=['print(123)'],
            >>>                    want_lines=['123'], line_offset=0, partno=1)
            >>> # xdoctest: -NORMALIZE_WHITESPACE
            >>> print(self.format_part(partnos=True))
            (p1) 1 >>> print(123)
                   123

        Example:
            >>> from xdoctest.parser import *
            >>> self = DoctestPart(exec_lines=['print(123)'],
            >>>                    want_lines=['123'], line_offset=0, partno=1)
            >>> # xdoctest: -NORMALIZE_WHITESPACE
            >>> print(self.format_part(partnos=False, prefix=False,
            >>>                       linenos=False, want=False))
            print(123)
        """
        from xdoctest import utils
        if prefix:
            # Show the original line prefix when possible
            if self.orig_lines is None:
                src_text = utils.indent(self.source, '>>> ')
            else:
                src_text = '\n'.join(self.orig_lines)
        else:
            src_text = self.source

        want_text = self.want if self.want else ''

        if n_digits is None:
            endline = startline + self.n_lines
            n_digits = math.log(max(1, endline), 10)
            n_digits = int(math.ceil(n_digits))

        part_lines = src_text.splitlines()
        n_spaces = 0

        if linenos:
            n_spaces += n_digits + 1
            start = startline + self.line_offset
            part_lines = utils.add_line_numbers(part_lines,
                                                n_digits=n_digits,
                                                start=start)

        if partnos:
            part_lines = [
                '(p{}) {}'.format(self.partno, line) for line in part_lines
            ]
            n_spaces += 4 + 1  # FIXME could be more robust if more than 9 parts

        want_lines = []
        if want_text:
            want_fmt = ' ' * n_spaces + '{line}'
            for line in want_text.splitlines():
                if want:
                    want_lines.append(want_fmt.format(line=line))

        part_text = '\n'.join(part_lines)
        want_text = '\n'.join(want_lines)

        if colored:
            part_text = utils.highlight_code(part_text, 'python')
            want_text = utils.color_text(want_text, 'green')

        if want_lines:
            part_text += '\n' + want_text

        return part_text
コード例 #6
0
ファイル: runner.py プロジェクト: gotcha/xdoctest
def _print_summary_report(run_summary,
                          parse_warnlist,
                          n_seconds,
                          enabled_examples,
                          durations,
                          config=None,
                          _log=None):
    """
    Summary report formatting and printing
    """
    def cprint(text, color):
        if config is not None and config.get('colored', True):
            _log(utils.color_text(text, color))
        else:
            _log(text)

    # report errors
    failed = run_summary.get('failed', [])
    warned = run_summary.get('warned', [])

    # report parse-time warnings
    if parse_warnlist:
        cprint(
            '\n=== Found {} parse-time warnings ==='.format(
                len(parse_warnlist)), 'yellow')

        for warn_idx, warn in enumerate(parse_warnlist, start=1):
            cprint(
                '--- Parse Warning: {} / {} ---'.format(
                    warn_idx, len(parse_warnlist)), 'yellow')
            _log(
                utils.indent(
                    warnings.formatwarning(warn.message, warn.category,
                                           warn.filename, warn.lineno)))

    # report run-time warnings
    if warned:
        cprint('\n=== Found {} run-time warnings ==='.format(len(warned)),
               'yellow')
        for warn_idx, example in enumerate(warned, start=1):
            cprint(
                '--- Runtime Warning: {} / {} ---'.format(
                    warn_idx, len(warned)), 'yellow')
            _log('example = {!r}'.format(example))
            for warn in example.warn_list:
                _log(
                    utils.indent(
                        warnings.formatwarning(warn.message, warn.category,
                                               warn.filename, warn.lineno)))

    if failed and len(enabled_examples) > 1:
        # If there is more than one test being run, _log out all the
        # errors that occured so they are consolidated in a single place.
        cprint('\n=== Found {} errors ==='.format(len(failed)), 'red')
        for fail_idx, example in enumerate(failed, start=1):
            cprint('--- Error: {} / {} ---'.format(fail_idx, len(failed)),
                   'red')
            _log(utils.indent('\n'.join(example.repr_failure())))

    # Print command lines to re-run failed tests
    if failed:
        cprint('\n=== Failed tests ===', 'red')
        for example in failed:
            _log(example.cmdline)

    # final summary
    n_passed = run_summary.get('n_passed', 0)
    n_failed = run_summary.get('n_failed', 0)
    n_skipped = run_summary.get('n_skipped', 0)
    n_warnings = len(warned) + len(parse_warnlist)
    pairs = zip([n_failed, n_passed, n_skipped, n_warnings],
                ['failed', 'passed', 'skipped', 'warnings'])
    parts = ['{n} {t}'.format(n=n, t=t) for n, t in pairs if n > 0]
    _fmtstr = '=== ' + ', '.join(parts) + ' in {n_seconds:.2f} seconds ==='
    # _fmtstr = '=== ' + ' '.join(parts) + ' in {n_seconds:.2f} seconds ==='
    summary_line = _fmtstr.format(n_seconds=n_seconds)
    # color text based on worst type of error
    if n_failed > 0:
        cprint(summary_line, 'red')
    elif n_warnings > 0:
        cprint(summary_line, 'yellow')
    else:
        cprint(summary_line, 'green')

    if durations is not None:
        times = run_summary.get('times', {})
        test_time_tups = sorted(times.items(), key=lambda x: x[1])
        if durations > 0:
            test_time_tups = test_time_tups[-durations:]
        for example, n_secs in test_time_tups:
            _log('time: {:0.8f}, test: {}'.format(n_secs, example.cmdline))
コード例 #7
0
    def repr_failure(self, with_tb=True):
        r"""
        Constructs lines detailing information about a failed doctest

        CommandLine:
            python -m xdoctest.core DocTest.repr_failure:0
            python -m xdoctest.core DocTest.repr_failure:1
            python -m xdoctest.core DocTest.repr_failure:2

        Example:
            >>> from xdoctest.core import *
            >>> docstr = utils.codeblock(
                '''
                >>> x = 1
                >>> print(x + 1)
                2
                >>> print(x + 3)
                3
                >>> print(x + 100)
                101
                ''')
            >>> parsekw = dict(fpath='foo.txt', callname='bar', lineno=42)
            >>> self = list(parse_docstr_examples(docstr, **parsekw))[0]
            >>> summary = self.run(on_error='return', verbose=0)
            >>> print('[res]' + '\n[res]'.join(self.repr_failure()))

        Example:
            >>> from xdoctest.core import *
            >>> docstr = utils.codeblock(
                r'''
                >>> 1
                1
                >>> print('.▴  .\n.▴ ▴.') # xdoc: -NORMALIZE_WHITESPACE
                . ▴ .
                .▴ ▴.
                ''')
            >>> parsekw = dict(fpath='foo.txt', callname='bar', lineno=42)
            >>> self = list(parse_docstr_examples(docstr, **parsekw))[0]
            >>> summary = self.run(on_error='return', verbose=1)
            >>> print('[res]' + '\n[res]'.join(self.repr_failure()))


        Example:
            >>> from xdoctest.core import *
            >>> docstr = utils.codeblock(
                '''
                >>> assert True
                >>> assert False
                >>> x = 100
                ''')
            >>> self = list(parse_docstr_examples(docstr))[0]
            >>> summary = self.run(on_error='return', verbose=0)
            >>> print('[res]' + '\n[res]'.join(self.repr_failure()))
        """
        #     '=== LINES ===',
        # ]

        # if '--xdoc-debug' in sys.argv:
        #     lines += ['DEBUG PARTS: ']
        #     for partx, part in enumerate(self._parts):
        #         lines += [str(partx) + ': ' + str(part)]
        #         lines += ['  directives: {!r}'.format(part.directives)]
        #         lines += ['  want: {!r}'.format(str(part.want)[0:25])]
        #         val = self.logged_evals.get(partx, None)
        #         lines += ['  eval: ' + repr(val)]
        #         val = self.logged_stdout.get(partx, None)
        #         lines += ['  stdout: ' + repr(val)]
        #     partx = self._parts.index(self.failed_part)
        #     lines += [
        #         'failed partx = {}'.format(partx)
        #     ]
        #     failed_part = self.failed_part
        #     lines += ['----']
        #     lines += ['Failed part line offset:']
        #     lines += ['{}'.format(failed_part.line_offset)]
        #     lines += ['Failed directives:']
        #     lines += ['{}'.format(list(failed_part.directives))]

        #     lines += ['Failed part source:']
        #     lines += failed_part.source.splitlines()
        #     lines += ['Failed part want:']
        #     if failed_part.want_lines:
        #         lines += failed_part.want_lines
        #     lines += ['Failed part stdout:']
        #     lines += self.logged_stdout[partx].splitlines()
        #     lines += ['Failed part eval:']
        #     lines += [repr(self.logged_evals[partx])]
        #     lines += ['----']

        #     lines += [
        #         # 'self.module = {}'.format(self.module),
        #         # 'self.modpath = {}'.format(self.modpath),
        #         # 'self.modpath = {}'.format(self.modname),
        #         # 'self.global_namespace = {}'.format(self.global_namespace.keys()),
        #     ]
        # lines += ['Failed doctest in ' + self.callname]

        if self.exc_info is None:
            return []
        ex_type, ex_value, tb = self.exc_info
        # Failure line offset wrt the doctest (starts from 0)
        fail_offset = self.failed_line_offset()
        # Failure line number wrt the entire file (starts from 1)
        fail_lineno = self.failed_lineno()

        lines = [
            '* REASON: {}'.format(ex_type.__name__),
            self._color(self._block_prefix + ' DEBUG INFO', 'white'),
            '  XDoc "{}", line {}'.format(self.node, fail_offset + 1) +
            self._color(' <- wrt doctest', 'red'),
        ]

        colored = self.config['colored']
        if fail_lineno is not None:
            fpath = self.UNKNOWN_FPATH if self.fpath is None else self.fpath
            lines += [
                '  File "{}", line {},'.format(fpath, fail_lineno) +
                self._color(' <- wrt source file', 'red')
            ]

        # lines += ['  in doctest "{}", line {}'.format(self.unique_callname,
        #                                               fail_offset + 1) +
        #           self._color(' <- relative line number in the docstest', 'red')]

        # source_text = self.format_src(colored=colored, linenos=True,
        #                               want=False)
        # source_text = utils.indent(source_text)
        # lines += source_text.splitlines()

        def r1_strip_nl(text):
            if text is None:
                return None
            return text[:-1] if text.endswith('\n') else text

        # if self.logged_stdout:
        #     lines += ['stdout results:']
        #     lines += [r1_strip_nl(t) for t in self.logged_stdout.values() if t]

        textgen = self.format_parts(colored=colored, linenos=True, want=False)

        n_digits = 1

        # Logic to break output between pass, failed, and unexecuted parts
        before_part_lines = []
        fail_part_lines = []
        after_parts_lines = []
        temp = [before_part_lines, fail_part_lines, after_parts_lines]
        tindex = 0
        indent_text = ' ' * (5 + n_digits)

        for partx, (part, part_text) in enumerate(zip(self._parts, textgen)):
            if part in self._skipped_parts:
                # temp[tindex] += [utils.indent(part_text, ' ' * 4)]
                # temp[tindex] += [utils.indent(' >>> # skipped', indent_text)]
                continue
            part_out = r1_strip_nl(self.logged_stdout.get(partx, ''))
            if part is self.failed_part:
                tindex += 1
            # Append the part source code
            temp[tindex] += [utils.indent(part_text, ' ' * 4)]
            # Append the part stdout (if it exists)
            if part_out:
                temp[tindex] += [utils.indent(part_out, indent_text)]
            if part is self.failed_part:
                tindex += 1
            # part_eval = self.logged_evals[partx]
            # if part_eval is not NOT_EVALED:
            #     temp[tindex] += [repr(part_eval)]

        lines += [self._color(self._block_prefix + ' PART BREAKDOWN', 'white')]
        if before_part_lines:
            lines += ['Passed Parts:']
            lines += before_part_lines

        if fail_part_lines:
            lines += ['Failed Part:']
            lines += fail_part_lines

        if after_parts_lines:
            lines += ['Remaining Parts:']
            lines += after_parts_lines

        lines += [self._color(self._block_prefix + ' TRACEBACK', 'white')]
        if hasattr(ex_value, 'output_difference'):
            lines += [
                ex_value.output_difference(self._runstate, colored=colored),
                ex_value.output_repr_difference(self._runstate)
            ]
        else:
            if with_tb:
                # TODO: enhance formatting to show an IPython-like output of
                # where the error occurred in the doctest
                tblines = traceback.format_exception(*self.exc_info)

                def _alter_traceback_linenos(self, tblines):
                    def overwrite_lineno(linepart):
                        # Replace the trailing part which is the lineno
                        old_linestr = linepart[-1]  # noqa

                        # This is the lineno we will insert
                        rel_lineno = self.failed_part.line_offset + tb_lineno
                        abs_lineno = self.lineno + rel_lineno - 1

                        new_linestr = 'rel: {rel}, abs: {abs}'.format(
                            rel=rel_lineno,
                            abs=abs_lineno,
                        )

                        linepart = linepart[:-1] + [new_linestr]
                        return linepart

                    new_tblines = []
                    for i, line in enumerate(tblines):

                        if 'xdoctest/xdoctest/doctest_example' in line:
                            # hack, remove ourselves from the tracback
                            continue
                            # new_tblines.append('!!!!!')
                            # raise Exception('foo')
                            # continue

                        if self._partfilename in line:
                            # Intercept the line corresponding to the doctest
                            tbparts = line.split(',')
                            tb_lineno = int(tbparts[-2].strip().split()[1])
                            # modify the line number to match the doctest
                            linepart = tbparts[-2].split(' ')

                            linepart = overwrite_lineno(linepart)

                            tbparts[-2] = ' '.join(linepart)
                            new_line = ','.join(tbparts)

                            # failed_ctx = '>>> ' + self.failed_part.exec_lines[tb_lineno - 1]
                            failed_ctx = self.failed_part.orig_lines[tb_lineno
                                                                     - 1]
                            extra = '    ' + failed_ctx
                            line = (new_line + extra + '\n')

                        # m = '(t{})'.format(i)
                        # line = m + line.replace('\n', '\n' + m)
                        new_tblines.append(line)

                    return new_tblines

                new_tblines = _alter_traceback_linenos(self, tblines)

                if colored:
                    tbtext = '\n'.join(new_tblines)
                    tbtext = utils.highlight_code(tbtext,
                                                  lexer_name='pytb',
                                                  stripall=True)
                    new_tblines = tbtext.splitlines()
                lines += new_tblines

        lines += [self._color(self._block_prefix + ' REPRODUCTION', 'white')]
        lines += ['CommandLine:']
        lines += ['    ' + self.cmdline]
        return lines
コード例 #8
0
ファイル: test_runner.py プロジェクト: MatsLanGoH/xdoctest
def test_runner_failures():
    """
    python testing/test_runner.py  test_runner_failures
    pytest testing/test_runner.py::test_runner_failures -s
    pytest testing/test_runner.py::test_all_disabled -s
    """
    from xdoctest import runner

    source = utils.codeblock('''
        def test1():
            """
                Example:
                    >>> pass
            """

        def test2():
            """
                Example:
                    >>> assert False, 'test 2.1'

                Example:
                    >>> assert False, 'test 2.2'
            """

        def test3():
            """
                Example:
                    >>> pass

                Example:
                    >>> pass
            """

        def test4():
            """
                Example:
                    >>> assert False, 'test 3'
            """
        ''')

    temp = utils.TempDir()
    temp.ensure()
    # with utils.TempDir() as temp:
    dpath = temp.dpath
    modpath = join(dpath, 'test_runner_failures.py')

    with open(modpath, 'w') as file:
        file.write(source)

    # disabled tests dont run in "all" mode
    with utils.CaptureStdout(supress=True) as cap:
        try:
            runner.doctest_module(modpath, 'all', argv=[''], verbose=1)
        except Exception:
            pass

    print('\nNOTE: the following output is part of a test')
    print(utils.indent(cap.text, '... '))
    print('NOTE: above output is part of a test')

    # assert '.FFF' in cap.text
    assert '3 / 6 passed' in cap.text
    assert '3 failed 3 passed' in cap.text
コード例 #9
0
    def output_difference(self, runstate=None, colored=True):
        """
        Return a string describing the differences between the expected output
        for a given example (`example`) and the actual output (`got`).
        The `runstate` contains option flags used to compare `want` and `got`.

        Notes:
            This does not check if got matches want, it only outputs the raw
            differences. Got/Want normalization may make the differences appear
            more exagerated than they are.
        """
        got = self.got
        want = self.want

        if runstate is None:
            runstate = directive.RuntimeState()

        # Don't normalize because it usually removes the newlines
        runstate_ = runstate.to_dict()

        # Don't normalize whitespaces in report for better visibility
        runstate_['NORMALIZE_WHITESPACE'] = False
        runstate_['IGNORE_WHITESPACE'] = False
        got, want = normalize(got, want, runstate_)

        # If <BLANKLINE>s are being used, then replace blank lines
        # with <BLANKLINE> in the actual output string.
        # if not runstate['DONT_ACCEPT_BLANKLINE']:
        #     got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)

        got = utils.ensure_unicode(got)

        # Check if we should use diff.
        if self._do_a_fancy_diff(runstate):
            # Split want & got into lines.
            want_lines = want.splitlines(True)
            got_lines = got.splitlines(True)
            # Use difflib to find their differences.
            if runstate['REPORT_UDIFF']:
                diff = difflib.unified_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'unified diff with -expected +actual'
            elif runstate['REPORT_CDIFF']:
                diff = difflib.context_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'context diff with expected followed by actual'
            elif runstate['REPORT_NDIFF']:
                # TODO: Is there a way to make Differ ignore whitespace if that
                # runtime directive is specified?
                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
                diff = list(engine.compare(want_lines, got_lines))
                kind = 'ndiff with -expected +actual'
            else:
                raise ValueError('Invalid difflib option')

            # Remove trailing whitespace on diff output.
            diff = [line.rstrip() + '\n' for line in diff]
            diff_text = ''.join(diff)
            if colored:
                diff_text = utils.highlight_code(diff_text, lexer_name='diff')

            text = 'Differences (%s):\n' % kind + utils.indent(diff_text)
        else:
            # If we're not using diff, then simply list the expected
            # output followed by the actual output.
            if want and got:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot:\n{}'.format(
                    utils.indent(self.want), utils.indent(self.got))
            elif want:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot nothing\n'.format(
                    utils.indent(want))
            elif got:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot:\n{}'.format(utils.indent(got))
            else:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot nothing\n'
        return text
コード例 #10
0
ファイル: runner.py プロジェクト: MatsLanGoH/xdoctest
def _print_summary_report(run_summary, parse_warnlist, n_seconds,
                          enabled_examples):
    """
    Summary report formatting and printing
    """
    def cprint(text, color):
        print(utils.color_text(text, color))

    # report errors
    failed = run_summary.get('failed', [])
    warned = run_summary.get('warned', [])

    # report parse-time warnings
    if parse_warnlist:
        cprint('\n=== Found {} parse-time warnings ==='.format(
            len(parse_warnlist)), 'yellow')

        for warn_idx, warn in enumerate(parse_warnlist, start=1):
            cprint('--- Parse Warning: {} / {} ---'.format(
                warn_idx, len(parse_warnlist)), 'yellow')
            print(utils.indent(
                warnings.formatwarning(warn.message, warn.category,
                                       warn.filename, warn.lineno)))

    # report run-time warnings
    if warned:
        cprint('\n=== Found {} run-time warnings ==='.format(len(warned)), 'yellow')
        for warn_idx, example in enumerate(warned, start=1):
            cprint('--- Runtime Warning: {} / {} ---'.format(warn_idx, len(warned)),
                   'yellow')
            print('example = {!r}'.format(example))
            for warn in example.warn_list:
                print(utils.indent(
                    warnings.formatwarning(warn.message, warn.category,
                                           warn.filename, warn.lineno)))

    if failed and len(enabled_examples) > 1:
        # If there is more than one test being run, print out all the
        # errors that occured so they are consolidated in a single place.
        cprint('\n=== Found {} errors ==='.format(len(failed)), 'red')
        for fail_idx, example in enumerate(failed, start=1):
            cprint('--- Error: {} / {} ---'.format(fail_idx, len(failed)), 'red')
            print(utils.indent('\n'.join(example.repr_failure())))

    # Print command lines to re-run failed tests
    if failed:
        cprint('\n=== Failed tests ===', 'red')
        for example in failed:
            print(example.cmdline)

    # final summary
    n_passed = run_summary.get('n_passed', 0)
    n_failed = run_summary.get('n_failed', 0)
    n_warnings = len(warned) + len(parse_warnlist)
    pairs = zip([n_failed, n_passed, n_warnings],
                ['failed', 'passed', 'warnings'])
    parts = ['{n} {t}'.format(n=n, t=t) for n, t in pairs  if n > 0]
    fmtstr = '=== ' + ' '.join(parts) + ' in {n_seconds:.2f} seconds ==='
    summary_line = fmtstr.format(n_seconds=n_seconds)
    # color text based on worst type of error
    if n_failed > 0:
        summary_line = utils.color_text(summary_line, 'red')
    elif n_warnings > 0:
        summary_line = utils.color_text(summary_line, 'yellow')
    else:
        summary_line = utils.color_text(summary_line, 'green')
    print(summary_line)
コード例 #11
0
def test_runner_syntax_error():
    """
        python testing/test_errors.py test_runner_syntax_error

        xdoctest -m testing/test_errors.py test_runner_syntax_error
    """
    source = utils.codeblock(r'''
        def demo_parsetime_syntax_error1():
            """
                Example:
                    >>> from __future__ import print_function
                    >>> print 'Parse-Time Syntax Error'
            """

        def demo_parsetime_syntax_error2():
            """
                Example:
                    >>> def bad_syntax() return for
            """

        def demo_runtime_error():
            """
                Example:
                    >>> print('Runtime Error {}'.format(5 / 0))
            """

        def demo_runtime_name_error():
            """
                Example:
                    >>> print('Name Error {}'.format(foo))
            """

        def demo_runtime_warning():
            """
                Example:
                    >>> import warnings
                    >>> warnings.warn('in-code warning')
            """
        ''')

    temp = utils.TempDir(persist=True)
    temp.ensure()
    dpath = temp.dpath
    modpath = join(dpath, 'demo_runner_syntax_error.py')
    with open(modpath, 'w') as file:
        file.write(source)

    with utils.CaptureStdout() as cap:
        runner.doctest_module(modpath,
                              'all',
                              argv=[''],
                              style='freeform',
                              verbose=1)

    print('CAPTURED [[[[[[[[')
    print(utils.indent(cap.text))
    print(']]]]]]]] # CAPTURED')

    if six.PY2:
        captext = utils.ensure_unicode(cap.text)
    else:
        captext = cap.text

    if True or not six.PY2:  # Why does this have issues on the dashboards?
        assert '1 run-time warnings' in captext
        assert '2 parse-time warnings' in captext

        # Assert summary line
        assert '3 warnings' in captext
        assert '2 failed' in captext
        assert '1 passed' in captext