Exemplo n.º 1
0
def _run_case(source):
    from xdoctest import utils
    COLOR = 'yellow'

    def cprint(msg, color=COLOR):
        print(utils.color_text(str(msg), COLOR))

    cprint('\n\n' '\n <RUN CASE> ' '\n  ========  ' '\n', COLOR)

    cprint('DOCTEST SOURCE:')
    cprint('---------------')
    print(
        utils.indent(
            utils.add_line_numbers(utils.highlight_code(source, 'python'))))

    print('')

    import hashlib
    hasher = hashlib.sha1()
    hasher.update(source.encode('utf8'))
    hashid = hasher.hexdigest()[0:8]

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_linenos_' + hashid + '.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout(supress=False) as cap:
            runner.doctest_module(modpath, 'all', argv=[''])

    cprint('\n\n --- </END RUN CASE> --- \n\n', COLOR)
    return cap.text
Exemplo n.º 2
0
def _run_case(source, style='auto'):
    """
    Runs all doctests in a source block

    Args:
        source (str): source code of an entire file

    TODO: run case is over-duplicated and should be separated into a test utils directory
    """
    from xdoctest import utils
    from xdoctest import runner
    COLOR = 'yellow'
    def cprint(msg, color=COLOR):
        print(utils.color_text(str(msg), COLOR))
    cprint('\n\n'
           '\n <RUN CASE> '
           '\n  ========  '
           '\n', COLOR)

    cprint('CASE SOURCE:')
    cprint('------------')
    print(utils.indent(
        utils.add_line_numbers(utils.highlight_code(source, 'python'))))

    print('')

    import hashlib
    hasher = hashlib.sha1()
    hasher.update(source.encode('utf8'))
    hashid = hasher.hexdigest()[0:8]

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_linenos_' + hashid + '.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout(supress=False) as cap:
            runner.doctest_module(modpath, 'all', argv=[''], style=style)

    cprint('\n\n --- </END RUN CASE> --- \n\n', COLOR)
    return cap.text
Exemplo n.º 3
0
    def format_part(self,
                    linenos=True,
                    want=True,
                    startline=1,
                    n_digits=None,
                    colored=False,
                    partnos=False,
                    prefix=True):
        """
        Customizable formatting of the source and want for this doctest.

        CommandLine:
            python -m xdoctest.doctest_part DoctestPart.format_part

        Args:
            linenos (bool): show line numbers
            want (bool): include the want value if it exists
            startline (int): offsets the line numbering
            n_digits (int): number of digits to use for line numbers
            colored (bool): pygmentize the colde
            partnos (bool): if True, shows the part number in the string
            prefix (bool): if False, exclude the doctest `>>> ` prefix

        CommandLine:
            python -m xdoctest.doctest_part DoctestPart.format_part:0

        Example:
            >>> from xdoctest.parser import *
            >>> self = DoctestPart(exec_lines=['print(123)'],
            >>>                    want_lines=['123'], line_offset=0, partno=1)
            >>> # xdoctest: -NORMALIZE_WHITESPACE
            >>> print(self.format_part(partnos=True))
            (p1) 1 >>> print(123)
                   123

        Example:
            >>> from xdoctest.parser import *
            >>> self = DoctestPart(exec_lines=['print(123)'],
            >>>                    want_lines=['123'], line_offset=0, partno=1)
            >>> # xdoctest: -NORMALIZE_WHITESPACE
            >>> print(self.format_part(partnos=False, prefix=False,
            >>>                       linenos=False, want=False))
            print(123)
        """
        from xdoctest import utils
        if prefix:
            # Show the original line prefix when possible
            if self.orig_lines is None:
                src_text = utils.indent(self.source, '>>> ')
            else:
                src_text = '\n'.join(self.orig_lines)
        else:
            src_text = self.source

        want_text = self.want if self.want else ''

        if n_digits is None:
            endline = startline + self.n_lines
            n_digits = math.log(max(1, endline), 10)
            n_digits = int(math.ceil(n_digits))

        part_lines = src_text.splitlines()
        n_spaces = 0

        if linenos:
            n_spaces += n_digits + 1
            start = startline + self.line_offset
            part_lines = utils.add_line_numbers(part_lines,
                                                n_digits=n_digits,
                                                start=start)

        if partnos:
            part_lines = [
                '(p{}) {}'.format(self.partno, line) for line in part_lines
            ]
            n_spaces += 4 + 1  # FIXME could be more robust if more than 9 parts

        want_lines = []
        if want_text:
            want_fmt = ' ' * n_spaces + '{line}'
            for line in want_text.splitlines():
                if want:
                    want_lines.append(want_fmt.format(line=line))

        part_text = '\n'.join(part_lines)
        want_text = '\n'.join(want_lines)

        if colored:
            part_text = utils.highlight_code(part_text, 'python')
            want_text = utils.color_text(want_text, 'green')

        if want_lines:
            part_text += '\n' + want_text

        return part_text
Exemplo n.º 4
0
    def repr_failure(self, with_tb=True):
        r"""
        Constructs lines detailing information about a failed doctest

        CommandLine:
            python -m xdoctest.core DocTest.repr_failure:0
            python -m xdoctest.core DocTest.repr_failure:1
            python -m xdoctest.core DocTest.repr_failure:2

        Example:
            >>> from xdoctest.core import *
            >>> docstr = utils.codeblock(
                '''
                >>> x = 1
                >>> print(x + 1)
                2
                >>> print(x + 3)
                3
                >>> print(x + 100)
                101
                ''')
            >>> parsekw = dict(fpath='foo.txt', callname='bar', lineno=42)
            >>> self = list(parse_docstr_examples(docstr, **parsekw))[0]
            >>> summary = self.run(on_error='return', verbose=0)
            >>> print('[res]' + '\n[res]'.join(self.repr_failure()))

        Example:
            >>> from xdoctest.core import *
            >>> docstr = utils.codeblock(
                r'''
                >>> 1
                1
                >>> print('.▴  .\n.▴ ▴.') # xdoc: -NORMALIZE_WHITESPACE
                . ▴ .
                .▴ ▴.
                ''')
            >>> parsekw = dict(fpath='foo.txt', callname='bar', lineno=42)
            >>> self = list(parse_docstr_examples(docstr, **parsekw))[0]
            >>> summary = self.run(on_error='return', verbose=1)
            >>> print('[res]' + '\n[res]'.join(self.repr_failure()))


        Example:
            >>> from xdoctest.core import *
            >>> docstr = utils.codeblock(
                '''
                >>> assert True
                >>> assert False
                >>> x = 100
                ''')
            >>> self = list(parse_docstr_examples(docstr))[0]
            >>> summary = self.run(on_error='return', verbose=0)
            >>> print('[res]' + '\n[res]'.join(self.repr_failure()))
        """
        #     '=== LINES ===',
        # ]

        # if '--xdoc-debug' in sys.argv:
        #     lines += ['DEBUG PARTS: ']
        #     for partx, part in enumerate(self._parts):
        #         lines += [str(partx) + ': ' + str(part)]
        #         lines += ['  directives: {!r}'.format(part.directives)]
        #         lines += ['  want: {!r}'.format(str(part.want)[0:25])]
        #         val = self.logged_evals.get(partx, None)
        #         lines += ['  eval: ' + repr(val)]
        #         val = self.logged_stdout.get(partx, None)
        #         lines += ['  stdout: ' + repr(val)]
        #     partx = self._parts.index(self.failed_part)
        #     lines += [
        #         'failed partx = {}'.format(partx)
        #     ]
        #     failed_part = self.failed_part
        #     lines += ['----']
        #     lines += ['Failed part line offset:']
        #     lines += ['{}'.format(failed_part.line_offset)]
        #     lines += ['Failed directives:']
        #     lines += ['{}'.format(list(failed_part.directives))]

        #     lines += ['Failed part source:']
        #     lines += failed_part.source.splitlines()
        #     lines += ['Failed part want:']
        #     if failed_part.want_lines:
        #         lines += failed_part.want_lines
        #     lines += ['Failed part stdout:']
        #     lines += self.logged_stdout[partx].splitlines()
        #     lines += ['Failed part eval:']
        #     lines += [repr(self.logged_evals[partx])]
        #     lines += ['----']

        #     lines += [
        #         # 'self.module = {}'.format(self.module),
        #         # 'self.modpath = {}'.format(self.modpath),
        #         # 'self.modpath = {}'.format(self.modname),
        #         # 'self.global_namespace = {}'.format(self.global_namespace.keys()),
        #     ]
        # lines += ['Failed doctest in ' + self.callname]

        if self.exc_info is None:
            return []
        ex_type, ex_value, tb = self.exc_info
        # Failure line offset wrt the doctest (starts from 0)
        fail_offset = self.failed_line_offset()
        # Failure line number wrt the entire file (starts from 1)
        fail_lineno = self.failed_lineno()

        lines = [
            '* REASON: {}'.format(ex_type.__name__),
            self._color(self._block_prefix + ' DEBUG INFO', 'white'),
            '  XDoc "{}", line {}'.format(self.node, fail_offset + 1) +
            self._color(' <- wrt doctest', 'red'),
        ]

        colored = self.config['colored']
        if fail_lineno is not None:
            fpath = self.UNKNOWN_FPATH if self.fpath is None else self.fpath
            lines += [
                '  File "{}", line {},'.format(fpath, fail_lineno) +
                self._color(' <- wrt source file', 'red')
            ]

        # lines += ['  in doctest "{}", line {}'.format(self.unique_callname,
        #                                               fail_offset + 1) +
        #           self._color(' <- relative line number in the docstest', 'red')]

        # source_text = self.format_src(colored=colored, linenos=True,
        #                               want=False)
        # source_text = utils.indent(source_text)
        # lines += source_text.splitlines()

        def r1_strip_nl(text):
            if text is None:
                return None
            return text[:-1] if text.endswith('\n') else text

        # if self.logged_stdout:
        #     lines += ['stdout results:']
        #     lines += [r1_strip_nl(t) for t in self.logged_stdout.values() if t]

        textgen = self.format_parts(colored=colored, linenos=True, want=False)

        n_digits = 1

        # Logic to break output between pass, failed, and unexecuted parts
        before_part_lines = []
        fail_part_lines = []
        after_parts_lines = []
        temp = [before_part_lines, fail_part_lines, after_parts_lines]
        tindex = 0
        indent_text = ' ' * (5 + n_digits)

        for partx, (part, part_text) in enumerate(zip(self._parts, textgen)):
            if part in self._skipped_parts:
                # temp[tindex] += [utils.indent(part_text, ' ' * 4)]
                # temp[tindex] += [utils.indent(' >>> # skipped', indent_text)]
                continue
            part_out = r1_strip_nl(self.logged_stdout.get(partx, ''))
            if part is self.failed_part:
                tindex += 1
            # Append the part source code
            temp[tindex] += [utils.indent(part_text, ' ' * 4)]
            # Append the part stdout (if it exists)
            if part_out:
                temp[tindex] += [utils.indent(part_out, indent_text)]
            if part is self.failed_part:
                tindex += 1
            # part_eval = self.logged_evals[partx]
            # if part_eval is not NOT_EVALED:
            #     temp[tindex] += [repr(part_eval)]

        lines += [self._color(self._block_prefix + ' PART BREAKDOWN', 'white')]
        if before_part_lines:
            lines += ['Passed Parts:']
            lines += before_part_lines

        if fail_part_lines:
            lines += ['Failed Part:']
            lines += fail_part_lines

        if after_parts_lines:
            lines += ['Remaining Parts:']
            lines += after_parts_lines

        lines += [self._color(self._block_prefix + ' TRACEBACK', 'white')]
        if hasattr(ex_value, 'output_difference'):
            lines += [
                ex_value.output_difference(self._runstate, colored=colored),
                ex_value.output_repr_difference(self._runstate)
            ]
        else:
            if with_tb:
                # TODO: enhance formatting to show an IPython-like output of
                # where the error occurred in the doctest
                tblines = traceback.format_exception(*self.exc_info)

                def _alter_traceback_linenos(self, tblines):
                    def overwrite_lineno(linepart):
                        # Replace the trailing part which is the lineno
                        old_linestr = linepart[-1]  # noqa

                        # This is the lineno we will insert
                        rel_lineno = self.failed_part.line_offset + tb_lineno
                        abs_lineno = self.lineno + rel_lineno - 1

                        new_linestr = 'rel: {rel}, abs: {abs}'.format(
                            rel=rel_lineno,
                            abs=abs_lineno,
                        )

                        linepart = linepart[:-1] + [new_linestr]
                        return linepart

                    new_tblines = []
                    for i, line in enumerate(tblines):

                        if 'xdoctest/xdoctest/doctest_example' in line:
                            # hack, remove ourselves from the tracback
                            continue
                            # new_tblines.append('!!!!!')
                            # raise Exception('foo')
                            # continue

                        if self._partfilename in line:
                            # Intercept the line corresponding to the doctest
                            tbparts = line.split(',')
                            tb_lineno = int(tbparts[-2].strip().split()[1])
                            # modify the line number to match the doctest
                            linepart = tbparts[-2].split(' ')

                            linepart = overwrite_lineno(linepart)

                            tbparts[-2] = ' '.join(linepart)
                            new_line = ','.join(tbparts)

                            # failed_ctx = '>>> ' + self.failed_part.exec_lines[tb_lineno - 1]
                            failed_ctx = self.failed_part.orig_lines[tb_lineno
                                                                     - 1]
                            extra = '    ' + failed_ctx
                            line = (new_line + extra + '\n')

                        # m = '(t{})'.format(i)
                        # line = m + line.replace('\n', '\n' + m)
                        new_tblines.append(line)

                    return new_tblines

                new_tblines = _alter_traceback_linenos(self, tblines)

                if colored:
                    tbtext = '\n'.join(new_tblines)
                    tbtext = utils.highlight_code(tbtext,
                                                  lexer_name='pytb',
                                                  stripall=True)
                    new_tblines = tbtext.splitlines()
                lines += new_tblines

        lines += [self._color(self._block_prefix + ' REPRODUCTION', 'white')]
        lines += ['CommandLine:']
        lines += ['    ' + self.cmdline]
        return lines
Exemplo n.º 5
0
    def output_difference(self, runstate=None, colored=True):
        """
        Return a string describing the differences between the expected output
        for a given example (`example`) and the actual output (`got`).
        The `runstate` contains option flags used to compare `want` and `got`.

        Notes:
            This does not check if got matches want, it only outputs the raw
            differences. Got/Want normalization may make the differences appear
            more exagerated than they are.
        """
        got = self.got
        want = self.want

        if runstate is None:
            runstate = directive.RuntimeState()

        # Don't normalize because it usually removes the newlines
        runstate_ = runstate.to_dict()

        # Don't normalize whitespaces in report for better visibility
        runstate_['NORMALIZE_WHITESPACE'] = False
        runstate_['IGNORE_WHITESPACE'] = False
        got, want = normalize(got, want, runstate_)

        # If <BLANKLINE>s are being used, then replace blank lines
        # with <BLANKLINE> in the actual output string.
        # if not runstate['DONT_ACCEPT_BLANKLINE']:
        #     got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)

        got = utils.ensure_unicode(got)

        # Check if we should use diff.
        if self._do_a_fancy_diff(runstate):
            # Split want & got into lines.
            want_lines = want.splitlines(True)
            got_lines = got.splitlines(True)
            # Use difflib to find their differences.
            if runstate['REPORT_UDIFF']:
                diff = difflib.unified_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'unified diff with -expected +actual'
            elif runstate['REPORT_CDIFF']:
                diff = difflib.context_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'context diff with expected followed by actual'
            elif runstate['REPORT_NDIFF']:
                # TODO: Is there a way to make Differ ignore whitespace if that
                # runtime directive is specified?
                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
                diff = list(engine.compare(want_lines, got_lines))
                kind = 'ndiff with -expected +actual'
            else:
                raise ValueError('Invalid difflib option')

            # Remove trailing whitespace on diff output.
            diff = [line.rstrip() + '\n' for line in diff]
            diff_text = ''.join(diff)
            if colored:
                diff_text = utils.highlight_code(diff_text, lexer_name='diff')

            text = 'Differences (%s):\n' % kind + utils.indent(diff_text)
        else:
            # If we're not using diff, then simply list the expected
            # output followed by the actual output.
            if want and got:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot:\n{}'.format(
                    utils.indent(self.want), utils.indent(self.got))
            elif want:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot nothing\n'.format(
                    utils.indent(want))
            elif got:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot:\n{}'.format(utils.indent(got))
            else:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot nothing\n'
        return text