Example #1
0
def test_run_zero_arg():
    """
    pytest testing/test_runner.py::test_run_zero_arg -s
    """
    from xdoctest import runner

    source = utils.codeblock(
        '''
        def zero_arg_print():
            print('running zero arg')
        ''')

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_run_zero_arg.py')

        with open(modpath, 'w') as file:
            file.write(source)

        # disabled tests dont run in "all" mode
        with utils.CaptureStdout() as cap:
            try:
                runner.doctest_module(modpath, 'all', argv=[''], verbose=3)
            except Exception:
                pass
        assert 'running zero arg' not in cap.text

        with utils.CaptureStdout() as cap:
            try:
                runner.doctest_module(modpath, 'zero_arg_print', argv=[''], verbose=3)
            except Exception:
                pass
        # print(cap.text)
        assert 'running zero arg' in cap.text
Example #2
0
def test_list():
    from xdoctest import runner

    source = utils.codeblock(
        '''
        # --- HELPERS ---
        def real_test1(a=1):
            """
                Example:
                    >>> pass
            """
            pass

        def fake_test1(a=1):
            pass

        def real_test2():
            """
                Example:
                    >>> pass
            """
            pass

        def fake_test2():
            pass
        ''')

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_list.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'list', argv=[''])

        assert 'real_test1' in cap.text
        assert 'real_test2' in cap.text
        assert 'fake_test1' not in cap.text
        assert 'fake_test2' not in cap.text

        # test command=None
        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, None, argv=[''])

        assert 'real_test1' in cap.text
        assert 'real_test2' in cap.text
        assert 'fake_test1' not in cap.text
        assert 'fake_test2' not in cap.text
Example #3
0
def test_runner_config():
    """
    pytest testing/test_runner.py::test_runner_config -s
    """
    from xdoctest import runner

    source = utils.codeblock('''
        def foo():
            """
                Example:
                    >>> print('i wanna see this')
            """
        ''')

    config = {
        'default_runtime_state': {
            'SKIP': True
        },
    }

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_example_run.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'foo', argv=[''], config=config)

    assert 'SKIPPED' in cap.text
Example #4
0
def _run_case(source):
    from xdoctest import utils
    COLOR = 'yellow'

    def cprint(msg, color=COLOR):
        print(utils.color_text(str(msg), COLOR))

    cprint('\n\n' '\n <RUN CASE> ' '\n  ========  ' '\n', COLOR)

    cprint('DOCTEST SOURCE:')
    cprint('---------------')
    print(
        utils.indent(
            utils.add_line_numbers(utils.highlight_code(source, 'python'))))

    print('')

    import hashlib
    hasher = hashlib.sha1()
    hasher.update(source.encode('utf8'))
    hashid = hasher.hexdigest()[0:8]

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_linenos_' + hashid + '.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout(supress=False) as cap:
            runner.doctest_module(modpath, 'all', argv=[''])

    cprint('\n\n --- </END RUN CASE> --- \n\n', COLOR)
    return cap.text
Example #5
0
def test_global_exec():
    """
    pytest testing/test_runner.py::test_global_exec -s
    """
    from xdoctest import runner

    source = utils.codeblock(
        '''
        def foo():
            """
                Example:
                    >>> print(a)
            """
        ''')

    config = {
        'global_exec': 'a=1',
    }

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_example_run.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'foo', argv=[''], config=config)

    assert '1 passed' in cap.text
Example #6
0
def test_runner_syntax_error():
    """
        python testing/test_errors.py test_runner_syntax_error
    """
    source = utils.codeblock(
        '''
        def test_parsetime_syntax_error1():
            """
                Example:
                    >>> from __future__ import print_function
                    >>> print 'Parse-Time Syntax Error'
            """

        def test_parsetime_syntax_error2():
            """
                Example:
                    >>> def bad_syntax() return for
            """

        def test_runtime_error():
            """
                Example:
                    >>> print('Runtime Error {}'.format(5 / 0))
            """

        def test_runtime_name_error():
            """
                Example:
                    >>> print('Name Error {}'.format(foo))
            """

        def test_runtime_warning():
            """
                Example:
                    >>> import warnings
                    >>> warnings.warn('in-code warning')
            """
        ''')

    temp = utils.TempDir(persist=True)
    temp.ensure()
    dpath = temp.dpath
    modpath = join(dpath, 'test_runner_syntax_error.py')
    open(modpath, 'w').write(source)

    with utils.CaptureStdout() as cap:
        runner.doctest_module(modpath, 'all', argv=[''], style='freeform',
                              verbose=0)

    print(utils.indent(cap.text))

    assert '1 run-time warnings' in cap.text
    assert '2 parse-time warnings' in cap.text

    # Assert summary line
    assert '3 warnings' in cap.text
    assert '2 failed' in cap.text
    assert '1 passed' in cap.text
Example #7
0
def test_all_disabled():
    """
    pytest testing/test_runner.py::test_all_disabled -s -vv
    python testing/test_runner.py test_all_disabled
    """
    from xdoctest import runner

    source = utils.codeblock(
        '''
        def foo():
            """
                Example:
                    >>> # DISABLE_DOCTEST
                    >>> print('all will' + ' not print this')
            """

        def bar():
            """
                Example:
                    >>> print('all will' + ' print this')
            """
        ''')

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_all_disabled.py')

        with open(modpath, 'w') as file:
            file.write(source)

        # disabled tests dont run in "all" mode
        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'all', argv=[''])
        assert 'all will print this' in cap.text
        # print('    ' + cap.text.replace('\n', '\n    '))
        assert 'all will not print this' not in cap.text

        # Running an disabled example explicitly should work
        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'foo', argv=[''])
        # print('    ' + cap.text.replace('\n', '\n    '))
        assert 'all will not print this' in cap.text
Example #8
0
def test_hack_the_sys_argv():
    """
    Tests hacky solution to issue #76

    pytest testing/test_runner.py::test_global_exec -s

    References:
        https://github.com/Erotemic/xdoctest/issues/76
    """
    from xdoctest import runner

    source = utils.codeblock(
        '''
        def foo():
            """
                Example:
                    >>> # xdoctest: +REQUIRES(--hackedflag)
                    >>> print('This will run if global_exec specified')
            """
        ''')

    import sys
    NEEDS_FIX = '--hackedflag' not in sys.argv

    config = {
        'global_exec': 'import sys; sys.argv.append("--hackedflag")'
    }

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_example_run.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'foo', argv=[''], config=config)

    if NEEDS_FIX:
        # Fix the global state
        sys.argv.remove('--hackedflag')

    # print(cap.text)
    assert '1 passed' in cap.text
Example #9
0
def _run_case(source, style='auto'):
    """
    Runs all doctests in a source block

    Args:
        source (str): source code of an entire file

    TODO: run case is over-duplicated and should be separated into a test utils directory
    """
    from xdoctest import utils
    from xdoctest import runner
    COLOR = 'yellow'
    def cprint(msg, color=COLOR):
        print(utils.color_text(str(msg), COLOR))
    cprint('\n\n'
           '\n <RUN CASE> '
           '\n  ========  '
           '\n', COLOR)

    cprint('CASE SOURCE:')
    cprint('------------')
    print(utils.indent(
        utils.add_line_numbers(utils.highlight_code(source, 'python'))))

    print('')

    import hashlib
    hasher = hashlib.sha1()
    hasher.update(source.encode('utf8'))
    hashid = hasher.hexdigest()[0:8]

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_linenos_' + hashid + '.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout(supress=False) as cap:
            runner.doctest_module(modpath, 'all', argv=[''], style=style)

    cprint('\n\n --- </END RUN CASE> --- \n\n', COLOR)
    return cap.text
Example #10
0
def test_example_run():
    from xdoctest import runner

    source = utils.codeblock('''
        def foo():
            """
                Example:
                    >>> print('i wanna see this')
            """
        ''')

    with utils.TempDir() as temp:
        dpath = temp.dpath
        modpath = join(dpath, 'test_example_run.py')

        with open(modpath, 'w') as file:
            file.write(source)

        with utils.CaptureStdout() as cap:
            runner.doctest_module(modpath, 'foo', argv=[''])

    assert 'i wanna see this' in cap.text
Example #11
0
    def run(self, verbose=None, on_error=None):
        """
        Executes the doctest, checks the results, reports the outcome.

        Args:
            verbose (int): verbosity level
            on_error (str): can be 'raise' or 'return'

        Returns:
            Dict : summary
        """
        on_error = self.config.getvalue('on_error', on_error)
        verbose = self.config.getvalue('verbose', verbose)
        if on_error not in {'raise', 'return'}:
            raise KeyError(on_error)

        self._parse()  # parse out parts if we have not already done so
        self._pre_run(verbose)
        self._import_module()

        # Prepare for actual test run
        test_globals, compileflags = self._test_globals()

        self.logged_evals.clear()
        self.logged_stdout.clear()
        self._unmatched_stdout = []

        self._skipped_parts = []
        self.exc_info = None
        self._suppressed_stdout = verbose <= 1

        # Initialize a new runtime state
        default_state = self.config['default_runtime_state']
        runstate = self._runstate = directive.RuntimeState(default_state)
        # setup reporting choice
        runstate.set_report_style(self.config['reportchoice'].lower())

        global_exec = self.config.getvalue('global_exec')
        if global_exec:
            # Hack to make it easier to specify multi-line input on the CLI
            global_source = utils.codeblock(global_exec.replace('\\n', '\n'))
            global_code = compile(global_source,
                                  mode='exec',
                                  filename='<doctest:' + self.node + ':' +
                                  'global_exec>',
                                  flags=compileflags,
                                  dont_inherit=True)
            exec(global_code, test_globals)

        # Can't do this because we can't force execution of SCRIPTS
        # if self.is_disabled():
        #     runstate['SKIP'] = True

        # - [x] TODO: fix CaptureStdout so it doesn't break embedding shells
        # don't capture stdout for zero-arg blocks
        # needs_capture = self.block_type != 'zero-arg'
        # I think the bug that broke embedding shells is fixed, so it is now
        # safe to capture. If not, uncomment above lines. If this works without
        # issue, then remove these notes in a future version.
        # needs_capture = False
        needs_capture = True

        # Use the same capture object for all parts in the test
        cap = utils.CaptureStdout(supress=self._suppressed_stdout,
                                  enabled=needs_capture)
        with warnings.catch_warnings(record=True) as self.warn_list:
            for partx, part in enumerate(self._parts):
                # Extract directives and and update runtime state
                runstate.update(part.directives)

                # Handle runtime actions
                if runstate['SKIP'] or len(runstate['REQUIRES']) > 0:
                    self._skipped_parts.append(part)
                    continue

                # Prepare to capture stdout and evaluated values
                self.failed_part = part
                got_eval = constants.NOT_EVALED
                try:
                    # Compile code, handle syntax errors
                    #   part.compile_mode can be single, exec, or eval.
                    #   Typically single is used instead of eval
                    self._partfilename = '<doctest:' + self.node + '>'
                    code = compile(part.source,
                                   mode=part.compile_mode,
                                   filename=self._partfilename,
                                   flags=compileflags,
                                   dont_inherit=True)
                except KeyboardInterrupt:  # nocover
                    raise
                except Exception:
                    raise
                    # self.exc_info = sys.exc_info()
                    # ex_type, ex_value, tb = self.exc_info
                    # self.failed_tb_lineno = tb.tb_lineno
                    # if on_error == 'raise':
                    #     raise
                try:
                    # Execute the doctest code
                    try:
                        # NOTE: For code passed to eval or exec, there is no
                        # difference between locals and globals. Only pass in
                        # one dict, otherwise there is weird behavior
                        with cap:
                            # We can execute each part using exec or eval.  If
                            # a doctest part has `compile_mode=eval` we
                            # exepect it to return an object with a repr that
                            # can compared to a "want" statement.
                            # print('part.compile_mode = {!r}'.format(part.compile_mode))
                            if part.compile_mode == 'eval':
                                # print('test_globals = {}'.format(sorted(test_globals.keys())))
                                got_eval = eval(code, test_globals)
                                # if EVAL_MIGHT_RETURN_COROUTINE:
                                #     import types
                                #     if isinstance(got_eval, types.CoroutineType):
                                #         # In 3.9-rc (2020-mar-31) it looks like
                                #         # eval sometimes returns coroutines. I
                                #         # found no docs on this. Not sure if it
                                #         # will be mainlined, but this seems to
                                #         # fix it.
                                #         import asyncio
                                #         got_eval =  asyncio.run(got_eval)
                            else:
                                exec(code, test_globals)

                        # Record any standard output and "got_eval" produced by
                        # this doctest_part.
                        self.logged_evals[partx] = got_eval
                        self.logged_stdout[partx] = cap.text
                    except Exception:
                        if part.want:
                            # A failure may be expected if the traceback
                            # matches the part's want statement.
                            exception = sys.exc_info()
                            traceback.format_exception_only(*exception[:2])
                            exc_got = traceback.format_exception_only(
                                *exception[:2])[-1]
                            want = part.want
                            checker.check_exception(exc_got, want, runstate)
                        else:
                            raise
                    else:
                        """
                        TODO:
                            [ ] - Delay got-want failure until the end of the
                            doctest. Allow the rest of the code to run.  If
                            multiple errors occur, show them both.
                        """
                        if part.want:
                            got_stdout = cap.text
                            if not runstate['IGNORE_WANT']:
                                part.check(got_stdout,
                                           got_eval,
                                           runstate,
                                           unmatched=self._unmatched_stdout)
                            # Clear unmatched output when a check passes
                            self._unmatched_stdout = []
                        else:
                            # If a part doesnt have a want allow its output to
                            # be matched by the next part.
                            self._unmatched_stdout.append(cap.text)

                # Handle anything that could go wrong
                except KeyboardInterrupt:  # nocover
                    raise
                except (exceptions.ExitTestException,
                        exceptions._pytest.outcomes.Skipped):
                    if verbose > 0:
                        print('Test gracefully exists')
                    break
                except checker.GotWantException:
                    # When the "got", does't match the "want"
                    self.exc_info = sys.exc_info()
                    if on_error == 'raise':
                        raise
                    break
                except checker.ExtractGotReprException as ex:
                    # When we fail to extract the "got"
                    self.exc_info = sys.exc_info()
                    if on_error == 'raise':
                        raise ex.orig_ex
                    break
                except Exception as _ex_dbg:
                    ex_type, ex_value, tb = sys.exc_info()

                    DEBUG = 0
                    if DEBUG:
                        print('_ex_dbg = {!r}'.format(_ex_dbg))
                        print('<DEBUG: doctest encountered exception>',
                              file=sys.stderr)
                        print(''.join(traceback.format_tb(tb)),
                              file=sys.stderr)
                        print('</DEBUG>', file=sys.stderr)

                    # Search for the traceback that corresponds with the
                    # doctest, and remove the parts that point to
                    # boilerplate lines in this file.
                    found_lineno = None
                    for sub_tb in _traverse_traceback(tb):
                        tb_filename = sub_tb.tb_frame.f_code.co_filename
                        if tb_filename == self._partfilename:
                            # Walk up the traceback until we find the one that has
                            # the doctest as the base filename
                            found_lineno = sub_tb.tb_lineno
                            break
                    if DEBUG:
                        # The only traceback remaining should be
                        # the part that is relevant to the user
                        print('<DEBUG: best sub_tb>', file=sys.stderr)
                        print('found_lineno = {!r}'.format(found_lineno),
                              file=sys.stderr)
                        print(''.join(traceback.format_tb(sub_tb)),
                              file=sys.stderr)
                        print('</DEBUG>', file=sys.stderr)

                    if found_lineno is None:
                        if DEBUG:
                            print(
                                'UNABLE TO CLEAN TRACEBACK. EXIT DUE TO DEBUG')
                            sys.exit(1)
                        raise ValueError(
                            'Could not clean traceback: ex = {!r}'.format(
                                _ex_dbg))
                    else:
                        self.failed_tb_lineno = found_lineno

                    self.exc_info = (ex_type, ex_value, tb)

                    # The idea of CLEAN_TRACEBACK is to make it so the
                    # traceback from this function doesn't clutter the error
                    # message the user sees.
                    if on_error == 'raise':
                        raise
                    break
                finally:
                    if cap.enabled:
                        assert cap.text is not None
                    # Ensure that we logged the output even in failure cases
                    self.logged_evals[partx] = got_eval
                    self.logged_stdout[partx] = cap.text

        if self.exc_info is None:
            self.failed_part = None

        if len(self._skipped_parts) == len(self._parts):
            # we skipped everything
            if self.mode == 'pytest':
                import pytest
                pytest.skip()

        summary = self._post_run(verbose)

        # Clear the global namespace so doctests don't leak memory
        self.global_namespace.clear()

        return summary
Example #12
0
def test_runner_failures():
    """
    python testing/test_runner.py  test_runner_failures
    pytest testing/test_runner.py::test_runner_failures -s
    pytest testing/test_runner.py::test_all_disabled -s
    """
    from xdoctest import runner

    source = utils.codeblock('''
        def test1():
            """
                Example:
                    >>> pass
            """

        def test2():
            """
                Example:
                    >>> assert False, 'test 2.1'

                Example:
                    >>> assert False, 'test 2.2'
            """

        def test3():
            """
                Example:
                    >>> pass

                Example:
                    >>> pass
            """

        def test4():
            """
                Example:
                    >>> assert False, 'test 3'
            """
        ''')

    temp = utils.TempDir()
    temp.ensure()
    # with utils.TempDir() as temp:
    dpath = temp.dpath
    modpath = join(dpath, 'test_runner_failures.py')

    with open(modpath, 'w') as file:
        file.write(source)

    # disabled tests dont run in "all" mode
    with utils.CaptureStdout(supress=True) as cap:
        try:
            runner.doctest_module(modpath, 'all', argv=[''], verbose=1)
        except Exception:
            pass

    print('\nNOTE: the following output is part of a test')
    print(utils.indent(cap.text, '... '))
    print('NOTE: above output is part of a test')

    # assert '.FFF' in cap.text
    assert '3 / 6 passed' in cap.text
    assert '3 failed 3 passed' in cap.text
Example #13
0
def test_runner_syntax_error():
    """
        python testing/test_errors.py test_runner_syntax_error

        xdoctest -m testing/test_errors.py test_runner_syntax_error
    """
    source = utils.codeblock(r'''
        def demo_parsetime_syntax_error1():
            """
                Example:
                    >>> from __future__ import print_function
                    >>> print 'Parse-Time Syntax Error'
            """

        def demo_parsetime_syntax_error2():
            """
                Example:
                    >>> def bad_syntax() return for
            """

        def demo_runtime_error():
            """
                Example:
                    >>> print('Runtime Error {}'.format(5 / 0))
            """

        def demo_runtime_name_error():
            """
                Example:
                    >>> print('Name Error {}'.format(foo))
            """

        def demo_runtime_warning():
            """
                Example:
                    >>> import warnings
                    >>> warnings.warn('in-code warning')
            """
        ''')

    temp = utils.TempDir(persist=True)
    temp.ensure()
    dpath = temp.dpath
    modpath = join(dpath, 'demo_runner_syntax_error.py')
    with open(modpath, 'w') as file:
        file.write(source)

    with utils.CaptureStdout() as cap:
        runner.doctest_module(modpath,
                              'all',
                              argv=[''],
                              style='freeform',
                              verbose=1)

    print('CAPTURED [[[[[[[[')
    print(utils.indent(cap.text))
    print(']]]]]]]] # CAPTURED')

    if six.PY2:
        captext = utils.ensure_unicode(cap.text)
    else:
        captext = cap.text

    if True or not six.PY2:  # Why does this have issues on the dashboards?
        assert '1 run-time warnings' in captext
        assert '2 parse-time warnings' in captext

        # Assert summary line
        assert '3 warnings' in captext
        assert '2 failed' in captext
        assert '1 passed' in captext