예제 #1
0
def test_blankline_not_accept():
    # Check that blankline is not normalized away when
    # DONT_ACCEPT_BLANKLINE is on
    runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': True})
    got = 'foo\n\nbar'
    want = 'foo\n<BLANKLINE>\nbar'
    assert not checker.check_output(got, want, runstate)
예제 #2
0
def test_blankline_accept():
    """
    pytest testing/test_checker.py
    """
    # Check that blankline is normalized away
    runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False})
    got = 'foo\n\nbar'
    want = 'foo\n<BLANKLINE>\nbar'
    assert checker.check_output(got, want, runstate)
예제 #3
0
def check_output(got, want, runstate=None):
    """
    Does the actual comparison between `got` and `want`
    """
    if not want:  # nocover
        return True
    if want:
        # Try default
        if got == want:
            return True

        if runstate is None:
            runstate = directive.RuntimeState()

        got, want = normalize(got, want, runstate)
        return _check_match(got, want, runstate)
    return False
예제 #4
0
    def _do_a_fancy_diff(self, runstate=None):
        # Not unless they asked for a fancy diff.
        got = self.got
        want = self.want

        if runstate is None:
            runstate = directive.RuntimeState()

        # ndiff does intraline difference marking, so can be useful even
        # for 1-line differences.
        if runstate['REPORT_NDIFF']:
            return True

        # The other diff types need at least a few lines to be helpful.
        if runstate['REPORT_UDIFF'] or runstate['REPORT_CDIFF']:
            return want.count('\n') > 2 and got.count('\n') > 2

        return False
예제 #5
0
    def output_repr_difference(self, runstate=None):
        """
        Constructs a repr difference with minimal normalization.
        """
        minimal_got = self.got.rstrip()
        minimal_want = self.want.rstrip()

        if runstate is None:
            runstate = directive.RuntimeState()

        # Don't normalize because it usually removes the newlines
        runstate_ = runstate.to_dict()

        if not runstate_['DONT_ACCEPT_BLANKLINE']:
            minimal_want = remove_blankline_marker(minimal_want)

        lines = [
            ('Repr Difference:'),
            # TODO: get a semi-normalized output before showing repr?
            ('    got  = {!r}'.format(minimal_got)),
            ('    want = {!r}'.format(minimal_want)),
        ]
        return '\n'.join(lines)
예제 #6
0
def test_blankline_failcase():
    # Check that blankline is not normalizd in a "got" statement
    runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False})
    got = 'foo\n<BLANKLINE>\nbar'
    want = 'foo\n\nbar'
    assert not checker.check_output(got, want, runstate)
예제 #7
0
    def run(self, verbose=None, on_error=None):
        """
        Executes the doctest, checks the results, reports the outcome.

        Args:
            verbose (int): verbosity level
            on_error (str): can be 'raise' or 'return'

        Returns:
            Dict : summary
        """
        on_error = self.config.getvalue('on_error', on_error)
        verbose = self.config.getvalue('verbose', verbose)
        if on_error not in {'raise', 'return'}:
            raise KeyError(on_error)

        self._parse()  # parse out parts if we have not already done so
        self._pre_run(verbose)
        self._import_module()

        # Prepare for actual test run
        test_globals, compileflags = self._test_globals()

        self.logged_evals.clear()
        self.logged_stdout.clear()
        self._unmatched_stdout = []

        self._skipped_parts = []
        self.exc_info = None
        self._suppressed_stdout = verbose <= 1

        # Initialize a new runtime state
        default_state = self.config['default_runtime_state']
        runstate = self._runstate = directive.RuntimeState(default_state)
        # setup reporting choice
        runstate.set_report_style(self.config['reportchoice'].lower())

        global_exec = self.config.getvalue('global_exec')
        if global_exec:
            # Hack to make it easier to specify multi-line input on the CLI
            global_source = utils.codeblock(global_exec.replace('\\n', '\n'))
            global_code = compile(global_source,
                                  mode='exec',
                                  filename='<doctest:' + self.node + ':' +
                                  'global_exec>',
                                  flags=compileflags,
                                  dont_inherit=True)
            exec(global_code, test_globals)

        # Can't do this because we can't force execution of SCRIPTS
        # if self.is_disabled():
        #     runstate['SKIP'] = True

        # - [x] TODO: fix CaptureStdout so it doesn't break embedding shells
        # don't capture stdout for zero-arg blocks
        # needs_capture = self.block_type != 'zero-arg'
        # I think the bug that broke embedding shells is fixed, so it is now
        # safe to capture. If not, uncomment above lines. If this works without
        # issue, then remove these notes in a future version.
        # needs_capture = False
        needs_capture = True

        # Use the same capture object for all parts in the test
        cap = utils.CaptureStdout(supress=self._suppressed_stdout,
                                  enabled=needs_capture)
        with warnings.catch_warnings(record=True) as self.warn_list:
            for partx, part in enumerate(self._parts):
                # Extract directives and and update runtime state
                runstate.update(part.directives)

                # Handle runtime actions
                if runstate['SKIP'] or len(runstate['REQUIRES']) > 0:
                    self._skipped_parts.append(part)
                    continue

                # Prepare to capture stdout and evaluated values
                self.failed_part = part
                got_eval = constants.NOT_EVALED
                try:
                    # Compile code, handle syntax errors
                    #   part.compile_mode can be single, exec, or eval.
                    #   Typically single is used instead of eval
                    self._partfilename = '<doctest:' + self.node + '>'
                    code = compile(part.source,
                                   mode=part.compile_mode,
                                   filename=self._partfilename,
                                   flags=compileflags,
                                   dont_inherit=True)
                except KeyboardInterrupt:  # nocover
                    raise
                except Exception:
                    raise
                    # self.exc_info = sys.exc_info()
                    # ex_type, ex_value, tb = self.exc_info
                    # self.failed_tb_lineno = tb.tb_lineno
                    # if on_error == 'raise':
                    #     raise
                try:
                    # Execute the doctest code
                    try:
                        # NOTE: For code passed to eval or exec, there is no
                        # difference between locals and globals. Only pass in
                        # one dict, otherwise there is weird behavior
                        with cap:
                            # We can execute each part using exec or eval.  If
                            # a doctest part has `compile_mode=eval` we
                            # exepect it to return an object with a repr that
                            # can compared to a "want" statement.
                            # print('part.compile_mode = {!r}'.format(part.compile_mode))
                            if part.compile_mode == 'eval':
                                # print('test_globals = {}'.format(sorted(test_globals.keys())))
                                got_eval = eval(code, test_globals)
                                # if EVAL_MIGHT_RETURN_COROUTINE:
                                #     import types
                                #     if isinstance(got_eval, types.CoroutineType):
                                #         # In 3.9-rc (2020-mar-31) it looks like
                                #         # eval sometimes returns coroutines. I
                                #         # found no docs on this. Not sure if it
                                #         # will be mainlined, but this seems to
                                #         # fix it.
                                #         import asyncio
                                #         got_eval =  asyncio.run(got_eval)
                            else:
                                exec(code, test_globals)

                        # Record any standard output and "got_eval" produced by
                        # this doctest_part.
                        self.logged_evals[partx] = got_eval
                        self.logged_stdout[partx] = cap.text
                    except Exception:
                        if part.want:
                            # A failure may be expected if the traceback
                            # matches the part's want statement.
                            exception = sys.exc_info()
                            traceback.format_exception_only(*exception[:2])
                            exc_got = traceback.format_exception_only(
                                *exception[:2])[-1]
                            want = part.want
                            checker.check_exception(exc_got, want, runstate)
                        else:
                            raise
                    else:
                        """
                        TODO:
                            [ ] - Delay got-want failure until the end of the
                            doctest. Allow the rest of the code to run.  If
                            multiple errors occur, show them both.
                        """
                        if part.want:
                            got_stdout = cap.text
                            if not runstate['IGNORE_WANT']:
                                part.check(got_stdout,
                                           got_eval,
                                           runstate,
                                           unmatched=self._unmatched_stdout)
                            # Clear unmatched output when a check passes
                            self._unmatched_stdout = []
                        else:
                            # If a part doesnt have a want allow its output to
                            # be matched by the next part.
                            self._unmatched_stdout.append(cap.text)

                # Handle anything that could go wrong
                except KeyboardInterrupt:  # nocover
                    raise
                except (exceptions.ExitTestException,
                        exceptions._pytest.outcomes.Skipped):
                    if verbose > 0:
                        print('Test gracefully exists')
                    break
                except checker.GotWantException:
                    # When the "got", does't match the "want"
                    self.exc_info = sys.exc_info()
                    if on_error == 'raise':
                        raise
                    break
                except checker.ExtractGotReprException as ex:
                    # When we fail to extract the "got"
                    self.exc_info = sys.exc_info()
                    if on_error == 'raise':
                        raise ex.orig_ex
                    break
                except Exception as _ex_dbg:
                    ex_type, ex_value, tb = sys.exc_info()

                    DEBUG = 0
                    if DEBUG:
                        print('_ex_dbg = {!r}'.format(_ex_dbg))
                        print('<DEBUG: doctest encountered exception>',
                              file=sys.stderr)
                        print(''.join(traceback.format_tb(tb)),
                              file=sys.stderr)
                        print('</DEBUG>', file=sys.stderr)

                    # Search for the traceback that corresponds with the
                    # doctest, and remove the parts that point to
                    # boilerplate lines in this file.
                    found_lineno = None
                    for sub_tb in _traverse_traceback(tb):
                        tb_filename = sub_tb.tb_frame.f_code.co_filename
                        if tb_filename == self._partfilename:
                            # Walk up the traceback until we find the one that has
                            # the doctest as the base filename
                            found_lineno = sub_tb.tb_lineno
                            break
                    if DEBUG:
                        # The only traceback remaining should be
                        # the part that is relevant to the user
                        print('<DEBUG: best sub_tb>', file=sys.stderr)
                        print('found_lineno = {!r}'.format(found_lineno),
                              file=sys.stderr)
                        print(''.join(traceback.format_tb(sub_tb)),
                              file=sys.stderr)
                        print('</DEBUG>', file=sys.stderr)

                    if found_lineno is None:
                        if DEBUG:
                            print(
                                'UNABLE TO CLEAN TRACEBACK. EXIT DUE TO DEBUG')
                            sys.exit(1)
                        raise ValueError(
                            'Could not clean traceback: ex = {!r}'.format(
                                _ex_dbg))
                    else:
                        self.failed_tb_lineno = found_lineno

                    self.exc_info = (ex_type, ex_value, tb)

                    # The idea of CLEAN_TRACEBACK is to make it so the
                    # traceback from this function doesn't clutter the error
                    # message the user sees.
                    if on_error == 'raise':
                        raise
                    break
                finally:
                    if cap.enabled:
                        assert cap.text is not None
                    # Ensure that we logged the output even in failure cases
                    self.logged_evals[partx] = got_eval
                    self.logged_stdout[partx] = cap.text

        if self.exc_info is None:
            self.failed_part = None

        if len(self._skipped_parts) == len(self._parts):
            # we skipped everything
            if self.mode == 'pytest':
                import pytest
                pytest.skip()

        summary = self._post_run(verbose)

        # Clear the global namespace so doctests don't leak memory
        self.global_namespace.clear()

        return summary
예제 #8
0
    def output_difference(self, runstate=None, colored=True):
        """
        Return a string describing the differences between the expected output
        for a given example (`example`) and the actual output (`got`).
        The `runstate` contains option flags used to compare `want` and `got`.

        Notes:
            This does not check if got matches want, it only outputs the raw
            differences. Got/Want normalization may make the differences appear
            more exagerated than they are.
        """
        got = self.got
        want = self.want

        if runstate is None:
            runstate = directive.RuntimeState()

        # Don't normalize because it usually removes the newlines
        runstate_ = runstate.to_dict()

        # Don't normalize whitespaces in report for better visibility
        runstate_['NORMALIZE_WHITESPACE'] = False
        runstate_['IGNORE_WHITESPACE'] = False
        got, want = normalize(got, want, runstate_)

        # If <BLANKLINE>s are being used, then replace blank lines
        # with <BLANKLINE> in the actual output string.
        # if not runstate['DONT_ACCEPT_BLANKLINE']:
        #     got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)

        got = utils.ensure_unicode(got)

        # Check if we should use diff.
        if self._do_a_fancy_diff(runstate):
            # Split want & got into lines.
            want_lines = want.splitlines(True)
            got_lines = got.splitlines(True)
            # Use difflib to find their differences.
            if runstate['REPORT_UDIFF']:
                diff = difflib.unified_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'unified diff with -expected +actual'
            elif runstate['REPORT_CDIFF']:
                diff = difflib.context_diff(want_lines, got_lines, n=2)
                diff = list(diff)[2:]  # strip the diff header
                kind = 'context diff with expected followed by actual'
            elif runstate['REPORT_NDIFF']:
                # TODO: Is there a way to make Differ ignore whitespace if that
                # runtime directive is specified?
                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
                diff = list(engine.compare(want_lines, got_lines))
                kind = 'ndiff with -expected +actual'
            else:
                raise ValueError('Invalid difflib option')

            # Remove trailing whitespace on diff output.
            diff = [line.rstrip() + '\n' for line in diff]
            diff_text = ''.join(diff)
            if colored:
                diff_text = utils.highlight_code(diff_text, lexer_name='diff')

            text = 'Differences (%s):\n' % kind + utils.indent(diff_text)
        else:
            # If we're not using diff, then simply list the expected
            # output followed by the actual output.
            if want and got:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot:\n{}'.format(
                    utils.indent(self.want), utils.indent(self.got))
            elif want:
                if colored:
                    got = utils.color_text(got, 'red')
                    want = utils.color_text(want, 'red')
                text = 'Expected:\n{}\nGot nothing\n'.format(
                    utils.indent(want))
            elif got:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot:\n{}'.format(utils.indent(got))
            else:  # nocover
                raise AssertionError('impossible state')
                text = 'Expected nothing\nGot nothing\n'
        return text
예제 #9
0
def normalize(got, want, runstate=None):
    r"""
    Adapated from doctest_nose_plugin.py from the nltk project:
        https://github.com/nltk/nltk

    Further extended to also support byte literals.

    Example:
        >>> want = "...\n(0, 2, {'weight': 1})\n(0, 3, {'weight': 2})"
        >>> got = "(0, 2, {'weight': 1})\n(0, 3, {'weight': 2})"
    """
    if runstate is None:
        runstate = directive.RuntimeState()

    def remove_prefixes(regex, text):
        return re.sub(regex, r'\1\2', text)

    def visible_text(lines):
        # TODO: backspaces
        # Any lines that end with only a carrage return are erased
        return [line for line in lines if not line.endswith('\r')]

    # Remove terminal colors
    if True:
        got = utils.strip_ansi(got)
        want = utils.strip_ansi(want)

    if True:
        # normalize python 2/3 byte/unicode prefixes
        got = remove_prefixes(unicode_literal_re, got)
        want = remove_prefixes(unicode_literal_re, want)

        # Note: normalizing away prefixes can cause weird "got"
        # results to print when there is a got-want mismatch.
        # For instance, if you get {'b': 22} but you want {'b': 2}
        # this will cause xdoctest to report that you wanted {'': 2}
        # because it reports the normalized version of the want message
        got = remove_prefixes(bytes_literal_re, got)
        want = remove_prefixes(bytes_literal_re, want)

    # Replace <BLANKLINE>s if it is being used.
    if not runstate['DONT_ACCEPT_BLANKLINE']:
        want = remove_blankline_marker(want)

    # always remove trailing whitepsace
    got = re.sub(TRAILING_WS, '', got)
    want = re.sub(TRAILING_WS, '', want)
    # normalize endling newlines
    want = want.rstrip()
    got = got.rstrip()

    # Always remove invisible text
    got_lines = got.splitlines(True)
    want_lines = want.splitlines(True)
    got_lines = visible_text(got_lines)
    want_lines = visible_text(want_lines)
    want = ''.join(want_lines)
    got = ''.join(got_lines)

    if runstate['NORMALIZE_WHITESPACE'] or runstate['IGNORE_WHITESPACE']:

        # all whitespace normalization
        # treat newlines and all whitespace as a single space
        got = ' '.join(got.split())
        want = ' '.join(want.split())

    if runstate['IGNORE_WHITESPACE']:
        # Completely remove whitespace
        got = re.sub(r'\s', '', got, flags=re.MULTILINE)
        want = re.sub(r'\s', '', want, flags=re.MULTILINE)

    if runstate['NORMALIZE_REPR']:

        def norm_repr(a, b):
            # If removing quotes would allow for a match, remove them.
            if not _check_match(a, b, runstate):
                for q in ['"', "'"]:
                    if a.startswith(q) and a.endswith(q):
                        if _check_match(a[1:-1], b, runstate):
                            return a[1:-1]
            return a

        got = norm_repr(got, want)
        want = norm_repr(want, got)

    return got, want