예제 #1
0
    def format_part(self,
                    linenos=True,
                    want=True,
                    startline=1,
                    n_digits=None,
                    colored=False,
                    partnos=False,
                    prefix=True):
        """
        Customizable formatting of the source and want for this doctest.

        CommandLine:
            python -m xdoctest.doctest_part DoctestPart.format_part

        Args:
            linenos (bool): show line numbers
            want (bool): include the want value if it exists
            startline (int): offsets the line numbering
            n_digits (int): number of digits to use for line numbers
            colored (bool): pygmentize the colde
            partnos (bool): if True, shows the part number in the string
            prefix (bool): if False, exclude the doctest `>>> ` prefix

        CommandLine:
            python -m xdoctest.doctest_part DoctestPart.format_part:0

        Example:
            >>> from xdoctest.parser import *
            >>> self = DoctestPart(exec_lines=['print(123)'],
            >>>                    want_lines=['123'], line_offset=0, partno=1)
            >>> # xdoctest: -NORMALIZE_WHITESPACE
            >>> print(self.format_part(partnos=True))
            (p1) 1 >>> print(123)
                   123

        Example:
            >>> from xdoctest.parser import *
            >>> self = DoctestPart(exec_lines=['print(123)'],
            >>>                    want_lines=['123'], line_offset=0, partno=1)
            >>> # xdoctest: -NORMALIZE_WHITESPACE
            >>> print(self.format_part(partnos=False, prefix=False,
            >>>                       linenos=False, want=False))
            print(123)
        """
        from xdoctest import utils
        if prefix:
            # Show the original line prefix when possible
            if self.orig_lines is None:
                src_text = utils.indent(self.source, '>>> ')
            else:
                src_text = '\n'.join(self.orig_lines)
        else:
            src_text = self.source

        want_text = self.want if self.want else ''

        if n_digits is None:
            endline = startline + self.n_lines
            n_digits = math.log(max(1, endline), 10)
            n_digits = int(math.ceil(n_digits))

        part_lines = src_text.splitlines()
        n_spaces = 0

        if linenos:
            n_spaces += n_digits + 1
            start = startline + self.line_offset
            part_lines = utils.add_line_numbers(part_lines,
                                                n_digits=n_digits,
                                                start=start)

        if partnos:
            part_lines = [
                '(p{}) {}'.format(self.partno, line) for line in part_lines
            ]
            n_spaces += 4 + 1  # FIXME could be more robust if more than 9 parts

        want_lines = []
        if want_text:
            want_fmt = ' ' * n_spaces + '{line}'
            for line in want_text.splitlines():
                if want:
                    want_lines.append(want_fmt.format(line=line))

        part_text = '\n'.join(part_lines)
        want_text = '\n'.join(want_lines)

        if colored:
            part_text = utils.highlight_code(part_text, 'python')
            want_text = utils.color_text(want_text, 'green')

        if want_lines:
            part_text += '\n' + want_text

        return part_text
예제 #2
0
파일: runner.py 프로젝트: gotcha/xdoctest
 def cprint(text, color):
     if config is not None and config.get('colored', True):
         _log(utils.color_text(text, color))
     else:
         _log(text)
예제 #3
0
파일: runner.py 프로젝트: gotcha/xdoctest
def _run_examples(enabled_examples, verbose, config=None, _log=None):
    """
    Internal helper, loops over each example, runs it, returns a summary
    """
    n_total = len(enabled_examples)
    _log('running %d test(s)' % n_total)
    summaries = []
    failed = []
    warned = []
    times = {}
    # It is important to raise immediatly within the test to display errors
    # returned from multiprocessing. Especially in zero-arg mode

    on_error = 'return' if n_total > 1 else 'raise'
    on_error = 'return'

    for example in enabled_examples:
        try:
            try:
                tic = time.time()
                summary = example.run(verbose=verbose, on_error=on_error)
                toc = time.time()
                n_seconds = toc - tic
                times[example] = n_seconds
            except Exception:
                _log('\n'.join(example.repr_failure(with_tb=False)))
                raise

            summaries.append(summary)
            if example.warn_list:
                warned.append(example)
            if summary['skipped']:
                pass
                # if verbose == 0:
                #     # TODO: should we write anything when verbose=0?
                #     sys.stdout.write('S')
                #     sys.stdout.flush()
            elif summary['passed']:
                pass
                # if verbose == 0:
                #     # TODO: should we write anything when verbose=0?
                #     sys.stdout.write('.')
                #     sys.stdout.flush()
            else:
                failed.append(example)
                # if verbose == 0:
                #     sys.stdout.write('F')
                #     sys.stdout.flush()
                if on_error == 'raise':
                    # What happens if we don't re-raise here?
                    # If it is necessary, write a message explaining why
                    _log('\n'.join(example.repr_failure()))
                    ex_value = example.exc_info[1]
                    raise ex_value
        except KeyboardInterrupt:
            _log('Caught CTRL+c: Stopping tests')
            break
        # except Exception:
        #     summary = {'passed': False}
        #     if verbose == 0:
        #         sys.stdout.write('F')
        #         sys.stdout.flush()
    if verbose == 0:
        _log('')
    n_passed = sum(s['passed'] for s in summaries)
    n_failed = sum(s['failed'] for s in summaries)
    n_skipped = sum(s['skipped'] for s in summaries)

    if config is not None and config.get('colored', True):
        _log(utils.color_text('============', 'white'))
    else:
        _log('============')

    if n_total > 1:
        # and verbose > 0:
        _log('Finished doctests')
        _log('%d / %d passed' % (n_passed, n_total))

    run_summary = {
        'failed': failed,
        'warned': warned,
        'action': 'run_examples',
        'n_warned': len(warned),
        'n_skipped': n_skipped,
        'n_passed': n_passed,
        'n_failed': n_failed,
        'n_total': n_total,
        'times': times,
    }
    return run_summary
예제 #4
0
def test_label_indented_lines():
    string = '''
            text
            >>> dsrc1()
            want

                >>> dsrc2()
                >>> cont(
                ... a=b)
                ... dsrc
                >>> dsrc3():
                ...     a
                ...     b = """
                        multiline
                        """
                want

            text
            ... still text
            >>> "now its a doctest"

            text
    '''
    self = parser.DoctestParser()
    labeled = self._label_docsrc_lines(string)
    # import sys
    # print('EXIT NOW')
    # sys.exit(1)
    expected = [
        ('text', ''),
        ('text', '            text'),
        ('dsrc', '            >>> dsrc1()'),
        ('want', '            want'),
        ('text', ''),
        ('dsrc', '                >>> dsrc2()'),
        ('dsrc', '                >>> cont('),
        ('dcnt', '                ... a=b)'),
        ('dcnt', '                ... dsrc'),
        ('dsrc', '                >>> dsrc3():'),
        ('dcnt', '                ...     a'),
        ('dcnt', '                ...     b = """'),
        ('dcnt', '                        multiline'),
        ('dcnt', '                        """'),
        ('want', '                want'),
        ('text', ''),
        ('text', '            text'),
        ('text', '            ... still text'),
        ('dsrc', '            >>> "now its a doctest"'),
        ('text', ''),
        ('text', '            text'),
        ('text', '    '),  # FIXME: weird that this space has an indent
    ]
    if labeled != expected:
        try:
            # import ubelt as ub  # NOQA
            import itertools as it
            for got, want in it.zip_longest(labeled, expected):
                if got != want:
                    print(utils.color_text('GOT  = {!r}'.format(got), 'red'))
                    print(utils.color_text('WANT = {!r}'.format(want), 'blue'))
                else:
                    print('PASS = {!r}'.format(got))
        except ImportError:
            pass
        raise AssertionError
    assert labeled == expected
예제 #5
0
 def _color(self, text, color, enabled=None):
     """ conditionally color text based on config and flags """
     colored = self.config.getvalue('colored', enabled)
     if colored:
         text = utils.color_text(text, color)
     return text
예제 #6
0
 def cprint(msg, color=COLOR):
     print(utils.color_text(str(msg), COLOR))
예제 #7
0
    def output_difference(self, runstate=None, colored=True):
        """
        Return a string describing the differences between the expected output
        for a given example (`example`) and the actual output (`got`).
        The `runstate` contains option flags used to compare `want` and `got`.

        Notes:
            This does not check if got matches want, it only outputs the raw
            differences. Got/Want normalization may make the differences appear
            more exagerated than they are.
        """
        got = self.got
        want = self.want

        if runstate is None:
            runstate = directive.RuntimeState()

        # Don't normalize because it usually removes the newlines
        runstate_ = runstate.to_dict()

        # Don't normalize whitespaces in report for better visibility
        runstate_['NORMALIZE_WHITESPACE'] = False
        runstate_['IGNORE_WHITESPACE'] = False
        got, want = normalize(got, want, runstate_)

        # If <BLANKLINE>s are being used, then replace blank lines
        # with <BLANKLINE> in the actual output string.
        # if not runstate['DONT_ACCEPT_BLANKLINE']:
        #     got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)

        got = utils.ensure_unicode(got)

        # Check if we should use diff.
        if self._do_a_fancy_diff(runstate):
            # Split want & got into lines.
            want_lines = want.splitlines(True)
            got_lines = got.splitlines(True)
            # Use difflib to find their differences.
            if runstate['REPORT_UDIFF']:
                diff = difflib.unified_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'unified diff with -expected +actual'
            elif runstate['REPORT_CDIFF']:
                diff = difflib.context_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'context diff with expected followed by actual'
            elif runstate['REPORT_NDIFF']:
                # TODO: Is there a way to make Differ ignore whitespace if that
                # runtime directive is specified?
                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
                diff = list(engine.compare(want_lines, got_lines))
                kind = 'ndiff with -expected +actual'
            else:
                raise ValueError('Invalid difflib option')

            # Remove trailing whitespace on diff output.
            diff = [line.rstrip() + '\n' for line in diff]
            diff_text = ''.join(diff)
            if colored:
                diff_text = utils.highlight_code(diff_text, lexer_name='diff')

            text = 'Differences (%s):\n' % kind + utils.indent(diff_text)
        else:
            # If we're not using diff, then simply list the expected
            # output followed by the actual output.
            if want and got:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot:\n{}'.format(
                    utils.indent(self.want), utils.indent(self.got))
            elif want:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot nothing\n'.format(
                    utils.indent(want))
            elif got:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot:\n{}'.format(utils.indent(got))
            else:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot nothing\n'
        return text
예제 #8
0
def _run_examples(enabled_examples, verbose):
    """
    Internal helper, loops over each example, runs it, returns a summary
    """
    n_total = len(enabled_examples)
    print('running %d test(s)' % n_total)
    summaries = []
    failed = []
    warned = []
    # It is important to raise immediatly within the test to display errors
    # returned from multiprocessing. Especially in zero-arg mode

    on_error = 'return' if n_total > 1 else 'raise'
    on_error = 'return'
    for example in enabled_examples:
        try:
            summary = example.run(verbose=verbose, on_error=on_error)
        except Exception:
            print('\n'.join(example.repr_failure(with_tb=False)))
            raise

        summaries.append(summary)
        if example.warn_list:
            warned.append(example)
        if summary['passed']:
            if verbose == 0:
                # TODO: should we write anything when verbose=0?
                sys.stdout.write('.')
                sys.stdout.flush()
        else:
            failed.append(example)
            if verbose == 0:
                sys.stdout.write('F')
                sys.stdout.flush()
            if on_error == 'raise':
                # What happens if we don't re-raise here?
                # If it is necessary, write a message explaining why
                print('\n'.join(example.repr_failure()))
                ex_value = example.exc_info[1]
                raise ex_value

        # except Exception:
        #     summary = {'passed': False}
        #     if verbose == 0:
        #         sys.stdout.write('F')
        #         sys.stdout.flush()
    if verbose == 0:
        print('')
    n_passed = sum(s['passed'] for s in summaries)

    print(utils.color_text('============', 'white'))

    if n_total > 1:
        # and verbose > 0:
        print('Finished doctests')
        print('%d / %d passed'  % (n_passed, n_total))

    run_summary = {
        'failed': failed,
        'warned': warned,
        'action': 'run_examples',
        'n_warned': len(warned),
        'n_passed': n_passed,
        'n_failed': n_total - n_passed,
        'n_total': n_total,
    }
    return run_summary
예제 #9
0
 def cprint(text, color):
     print(utils.color_text(text, color))
예제 #10
0
def _print_summary_report(run_summary, parse_warnlist, n_seconds,
                          enabled_examples):
    """
    Summary report formatting and printing
    """
    def cprint(text, color):
        print(utils.color_text(text, color))

    # report errors
    failed = run_summary.get('failed', [])
    warned = run_summary.get('warned', [])

    # report parse-time warnings
    if parse_warnlist:
        cprint('\n=== Found {} parse-time warnings ==='.format(
            len(parse_warnlist)), 'yellow')

        for warn_idx, warn in enumerate(parse_warnlist, start=1):
            cprint('--- Parse Warning: {} / {} ---'.format(
                warn_idx, len(parse_warnlist)), 'yellow')
            print(utils.indent(
                warnings.formatwarning(warn.message, warn.category,
                                       warn.filename, warn.lineno)))

    # report run-time warnings
    if warned:
        cprint('\n=== Found {} run-time warnings ==='.format(len(warned)), 'yellow')
        for warn_idx, example in enumerate(warned, start=1):
            cprint('--- Runtime Warning: {} / {} ---'.format(warn_idx, len(warned)),
                   'yellow')
            print('example = {!r}'.format(example))
            for warn in example.warn_list:
                print(utils.indent(
                    warnings.formatwarning(warn.message, warn.category,
                                           warn.filename, warn.lineno)))

    if failed and len(enabled_examples) > 1:
        # If there is more than one test being run, print out all the
        # errors that occured so they are consolidated in a single place.
        cprint('\n=== Found {} errors ==='.format(len(failed)), 'red')
        for fail_idx, example in enumerate(failed, start=1):
            cprint('--- Error: {} / {} ---'.format(fail_idx, len(failed)), 'red')
            print(utils.indent('\n'.join(example.repr_failure())))

    # Print command lines to re-run failed tests
    if failed:
        cprint('\n=== Failed tests ===', 'red')
        for example in failed:
            print(example.cmdline)

    # final summary
    n_passed = run_summary.get('n_passed', 0)
    n_failed = run_summary.get('n_failed', 0)
    n_warnings = len(warned) + len(parse_warnlist)
    pairs = zip([n_failed, n_passed, n_warnings],
                ['failed', 'passed', 'warnings'])
    parts = ['{n} {t}'.format(n=n, t=t) for n, t in pairs  if n > 0]
    fmtstr = '=== ' + ' '.join(parts) + ' in {n_seconds:.2f} seconds ==='
    summary_line = fmtstr.format(n_seconds=n_seconds)
    # color text based on worst type of error
    if n_failed > 0:
        summary_line = utils.color_text(summary_line, 'red')
    elif n_warnings > 0:
        summary_line = utils.color_text(summary_line, 'yellow')
    else:
        summary_line = utils.color_text(summary_line, 'green')
    print(summary_line)