Beispiel #1
0
def _run_test_file_with_config(filename, globs, optionflags):
    """Modified from doctest.py to use custom checker."""

    text, filename = _load_testfile(filename)
    name = os.path.basename(filename)

    if globs is None:
        globs = {}
    else:
        globs = globs.copy()
    if '__name__' not in globs:
        globs['__name__'] = '__main__'

    checker = Py23DocChecker()
    runner = doctest.DocTestRunner(checker=checker,
                                   verbose=None,
                                   optionflags=optionflags)

    parser = doctest.DocTestParser()
    test = parser.get_doctest(text, globs, name, filename, 0)
    runner.run(test)

    runner.summarize()

    if doctest.master is None:
        doctest.master = runner
    else:
        doctest.master.merge(runner)

    return doctest.TestResults(runner.failures, runner.tries)
Beispiel #2
0
def _run_docstring_examples(f,
                            globs,
                            verbose=False,
                            name="NoName",
                            compileflags=None,
                            optionflags=0):
    """
    Test examples in the given object's docstring (`f`), using `globs`
    as globals.  Optional argument `name` is used in failure messages.
    If the optional argument `verbose` is true, then generate output
    even if there are no failures.

    `compileflags` gives the set of flags that should be used by the
    Python compiler when running the examples.  If not specified, then
    it will default to the set of future-import flags that apply to
    `globs`.

    Optional keyword arg `optionflags` specifies options for the
    testing and output.  See the documentation for `testmod` for more
    information.
    """
    # Find, parse, and run all tests in the given module.
    finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
    runner = doctest.DocTestRunner(verbose=verbose, optionflags=optionflags)
    for test in finder.find(f, name, globs=globs):
        runner.run(test, compileflags=compileflags)

    # ATK adds these two lines:
    runner.summarize()
    return doctest.TestResults(runner.failures, runner.tries)
Beispiel #3
0
def doctest_modules(modules,
                    verbose=False,
                    print_info=True,
                    extraglobs=dict()):
    finder = doctest.DocTestFinder(parser=DocTestParser())
    #    full_extraglobals = dict(globs.items() + extraglobs.items())
    full_extraglobals = globs.copy()
    full_extraglobals.update(extraglobs)
    failed, attempted = 0, 0
    for module in modules:
        if isinstance(module, types.ModuleType):
            runner = doctest.DocTestRunner(verbose=verbose)
            for test in finder.find(module, extraglobs=full_extraglobals):
                runner.run(test)
            result = runner.summarize()
        else:
            result = module(verbose=verbose)
        failed += result.failed
        attempted += result.attempted
        if print_info:
            print_results(module, result)

    if print_info:
        print('\nAll doctests:\n   %s failures out of %s tests.' %
              (failed, attempted))
    return doctest.TestResults(failed, attempted)
Beispiel #4
0
def ic_testmod(m,
               name=None,
               globs=None,
               verbose=None,
               report=True,
               optionflags=0,
               extraglobs=None,
               raise_on_error=False,
               exclude_empty=False):
    """See original code in doctest.testmod."""
    if name is None:
        name = m.__name__
    finder = DocTestFinder(exclude_empty=exclude_empty)
    if raise_on_error:
        runner = DebugRunner(checker=Py23DocChecker(),
                             verbose=verbose,
                             optionflags=optionflags)
    else:
        runner = DocTestRunner(checker=Py23DocChecker(),
                               verbose=verbose,
                               optionflags=optionflags)
    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
        runner.run(test)

    if report:
        runner.summarize()

    return doctest.TestResults(runner.failures, runner.tries)
Beispiel #5
0
def test_doctest(package_name, context_package_names):
    """
    Run all doctest strings in all Biotite subpackages.
    """
    # Collect all attributes of this package and its subpackages
    # as globals for the doctests
    globs = {}
    mod_names = []
    #The package itself is also used as context
    for name in context_package_names + [package_name]:
        context_package = import_module(name)
        globs.update({
            attr: getattr(context_package, attr)
            for attr in dir(context_package)
        })

    # Add fixed names for certain paths
    globs["path_to_directory"] = tempfile.gettempdir()
    globs["path_to_structures"] = join(".", "tests", "structure", "data")
    globs["path_to_sequences"] = join(".", "tests", "sequence", "data")
    # Add frequently used modules
    globs["np"] = np
    # Add frequently used objects
    globs["atom_array_stack"] = strucio.load_structure(
        join(".", "tests", "structure", "data", "1l2y.mmtf"))
    globs["atom_array"] = globs["atom_array_stack"][0]

    # Adjust NumPy print formatting
    np.set_printoptions(precision=3, floatmode="maxprec_equal")

    # Run doctests
    # This test does not use 'testfile()' or 'testmod()'
    # due to problems with doctest identification for Cython modules
    # More information below
    package = import_module(package_name)
    runner = doctest.DocTestRunner(verbose=False,
                                   optionflags=doctest.ELLIPSIS
                                   | doctest.REPORT_ONLY_FIRST_FAILURE)
    for test in doctest.DocTestFinder(exclude_empty=False).find(
            package,
            package.__name__,
            # It is necessary to set 'module' to 'False', as otherwise
            # Cython functions and classes would be falsely identified
            # as members of an external module by 'DocTestFinder._find()'
            # and consequently would be ignored
            #
            # Setting 'module=False' omits this check
            # This check is not necessary as the biotite subpackages
            # ('__init__.py' modules) should only contain attributes, that
            # are part of the package itself.
            module=False,
            extraglobs=globs):
        runner.run(test)
    results = doctest.TestResults(runner.failures, runner.tries)
    try:
        assert results.failed == 0
    except AssertionError:
        print(f"Failing doctest in module {package}")
        raise
Beispiel #6
0
def testmod(
    m=None,
    name=None,
    globs=None,
    verbose=None,
    report=True,
    optionflags=doctest.ELLIPSIS,
    extraglobs=None,
    raise_on_error=False,
    exclude_empty=False,
    verbose_level=None,
    filters=None,
):
    if globs == None:
        globs = dict()
    globs.update({"system_command": system_command})
    global master
    if m is None:
        m = sys.modules.get('__main__')
    if not inspect.ismodule(m):
        raise TypeError("testmod: module required; %r" % (m, ))
    if name is None:
        name = m.__name__
    finder = doctest.DocTestFinder(parser=ShellDocTestParser(),
                                   exclude_empty=exclude_empty)
    if raise_on_error:
        runner = doctest.DebugRunner(verbose=verbose, optionflags=optionflags)
    else:
        runner = ShellDocTestRunner(verbose=verbose,
                                    verbose_level=verbose_level,
                                    optionflags=optionflags)
    tests = finder.find(m, name, globs=globs, extraglobs=extraglobs)
    if filters:
        _tests = list()
        z = dict([(k, v) for v, k in enumerate(filters)])
        for test in tests:
            test.examples = sorted(
                filter(lambda x: x.label in filters, test.examples),
                cmp=lambda x, y: cmp(z[x.label], z[y.label]))
            _tests.append(test)
        tests = _tests
    for test in tests:
        runner.run(test)
    if report:
        runner.summarize()
    if master is None:
        master = runner
    else:
        master.merge(runner)
    if sys.version_info < (2, 6):
        return runner.failures, runner.tries
    return doctest.TestResults(runner.failures, runner.tries)
Beispiel #7
0
        def runtest(self):
            if self.fspath.basename == 'conftest.py':
                module = self.config._conftest.importconftest(self.fspath)
            else:
                module = self.fspath.pyimport()

            finder = DocTestFinderPlus(exclude_empty=False)
            opts = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
            runner = doctest.DebugRunner(verbose=False, optionflags=opts)

            for test in finder.find(module):
                runner.run(test)

            failed, tot = doctest.TestResults(runner.failures, runner.tries)
Beispiel #8
0
def testmod_with_finder(
        m=None, name=None, globs=None, verbose=None, report=True, optionflags=0,
        extraglobs=None, raise_on_error=False, exclude_empty=False,
        finder=None):
    """
    Augment `testmod` with the ability to pass a custom `DocTestFinder`
    instance, that allows for selecting specific tests.

    Optional keyword arg "finder" specifies a finder instance to use, besides
    the default `DocTestFinder`.
    """
    # If no module was given, then use __main__.
    if m is None:
        # DWA - m will still be None if this wasn't invoked from the command
        # line, in which case the following TypeError is about as good an error
        # as we should expect
        m = sys.modules.get('__main__')

    # Check that we were actually given a module.
    import inspect
    if not inspect.ismodule(m):
        raise TypeError("testmod: module required; %r" % (m,))

    # If no name was given, then use the module's name.
    if name is None:
        name = m.__name__

    # Find, parse, and run all tests in the given module.
    if finder is None:
        finder = doctest.DocTestFinder(exclude_empty=exclude_empty)

    if raise_on_error:
        runner = doctest.DebugRunner(verbose=verbose, optionflags=optionflags)
    else:
        runner = doctest.DocTestRunner(verbose=verbose, optionflags=optionflags)

    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
        runner.run(test)

    if report:
        runner.summarize()

    if doctest.master is None:
        doctest.master = runner
    else:
        doctest.master.merge(runner)

    return doctest.TestResults(runner.failures, runner.tries)
Beispiel #9
0
def doctest_modules(modules, verbose=False, print_info=True):
    finder = doctest.DocTestFinder()
    failed, attempted = 0, 0
    for module in modules:
        runner = doctest.DocTestRunner(verbose=verbose)
        for test in finder.find(module):
            runner.run(test)
        result = runner.summarize()
        failed += result.failed
        attempted += result.attempted
        if print_info:
            print_results(module, result)

    if print_info:
        print('\nAll doctests:\n   %s failures out of %s tests.' % (failed, attempted))
    return doctest.TestResults(failed, attempted)
Beispiel #10
0
 def test_testing_challenge_runs_only_selected_tests_and_runs(self):
     original_replacement = ("        return 24\n"
                             "    def foo(self):\n"
                             "        '''\n"
                             "        >>> Challenge().foo()\n"
                             "        'Success'\n"
                             "        '''\n"
                             "        return 'Failure'\n"
                             "def another():\n"
                             "    '''\n"
                             "    >>> another()\n"
                             "    [0, 1, 2]\n"
                             "    '''\n"
                             "    return list(range(3))\n")
     self.check_test_and_run_challenge(
         2020, 1, 'a', False, ['another'], False, original_replacement,
         (True, doctest.TestResults(failed=0, attempted=1), 24))
Beispiel #11
0
 def test_testing_challenge_runs_successful_tests_and_runs(self):
     original_replacement = ("        return 42\n"
                             "    def foo(self):\n"
                             "        '''\n"
                             "        >>> Challenge().foo()\n"
                             "        'Success'\n"
                             "        '''\n"
                             "        return 'Success'\n"
                             "def another():\n"
                             "    '''\n"
                             "    >>> another()\n"
                             "    [0, 1, 2]\n"
                             "    '''\n"
                             "    return list(range(3))\n")
     self.check_test_and_run_challenge(
         2020, 1, 'a', False, [], False, original_replacement,
         (True, doctest.TestResults(failed=0, attempted=3), 42))
def exec_tests(tests: Iterable[Doctest],
               quiet: bool = True) -> Tuple[doctest.TestResults, List[str]]:
    """Runs a list of `Doctest`s and collects and returns any error messages.

    Args:
        tests: The tests to run

    Returns: A tuple containing the results (# failures, # attempts) and a list
        of the error outputs from each failing test.
    """
    if not quiet:
        try_print = print
    else:
        try_print = lambda *args, **kwargs: None
    try_print('Executing tests ', end='')

    failed, attempted = 0, 0
    error_messages = []
    for test in tests:
        out = OutputCapture()
        with out:
            r = test.run()
        failed += r.failed
        attempted += r.attempted
        if r.failed != 0:
            try_print('F', end='', flush=True)
            error = shell_tools.highlight(
                '{}\n{} failed, {} passed, {} total\n'.format(
                    test.file_name, r.failed, r.attempted - r.failed,
                    r.attempted),
                shell_tools.RED,
            )
            error += out.content()
            error_messages.append(error)
        else:
            try_print('.', end='', flush=True)

    try_print()

    return doctest.TestResults(failed=failed,
                               attempted=attempted), error_messages
Beispiel #13
0
def run_doctests(verbose = False):
    failed, attempted = 0, 0

    finder = doctest.DocTestFinder()
    
    # Use the default docTest.OutputChecker to test our NumericOutputChecker
    runner = doctest.DocTestRunner(verbose = verbose)
    for test in finder.find(NumericOutputChecker):
        runner.run(test)
    result = runner.summarize()
    failed += result.failed
    attempted += result.attempted

    # Test our NumericOutputChecker in action!
    runner = doctest.DocTestRunner(checker = NumericOutputChecker(), verbose = verbose)
    for test in finder.find(NumericExample):
        runner.run(test)
        result = runner.summarize()
        failed += result.failed
        attempted += result.attempted

    return doctest.TestResults(failed, attempted)
Beispiel #14
0
def testmod(m=None,
            name=None,
            globs=None,
            verbose=None,
            report=True,
            optionflags=0,
            extraglobs=None,
            raise_on_error=False,
            exclude_empty=False):
    """Test examples in the given module.  Return (#failures, #tests).
    
    Largely duplicated from :func:`doctest.testmod`, but using
    :class:`_SelectiveDocTestParser`.

    Test examples in docstrings in functions and classes reachable
    from module m (or the current module if m is not supplied), starting
    with m.__doc__.

    Also test examples reachable from dict m.__test__ if it exists and is
    not None.  m.__test__ maps names to functions, classes and strings;
    function and class docstrings are tested even if the name is private;
    strings are tested directly, as if they were docstrings.

    Return (#failures, #tests).

    See help(doctest) for an overview.

    Optional keyword arg "name" gives the name of the module; by default
    use m.__name__.

    Optional keyword arg "globs" gives a dict to be used as the globals
    when executing examples; by default, use m.__dict__.  A copy of this
    dict is actually used for each docstring, so that each docstring's
    examples start with a clean slate.

    Optional keyword arg "extraglobs" gives a dictionary that should be
    merged into the globals that are used to execute examples.  By
    default, no extra globals are used.  This is new in 2.4.

    Optional keyword arg "verbose" prints lots of stuff if true, prints
    only failures if false; by default, it's true iff "-v" is in sys.argv.

    Optional keyword arg "report" prints a summary at the end when true,
    else prints nothing at the end.  In verbose mode, the summary is
    detailed, else very brief (in fact, empty if all tests passed).

    Optional keyword arg "optionflags" or's together module constants,
    and defaults to 0.  This is new in 2.3.  Possible values (see the
    docs for details):

        DONT_ACCEPT_TRUE_FOR_1
        DONT_ACCEPT_BLANKLINE
        NORMALIZE_WHITESPACE
        ELLIPSIS
        SKIP
        IGNORE_EXCEPTION_DETAIL
        REPORT_UDIFF
        REPORT_CDIFF
        REPORT_NDIFF
        REPORT_ONLY_FIRST_FAILURE
        
    as well as FiPy's flags
    
        GMSH
        SCIPY
        TVTK
        SERIAL
        PARALLEL
        PROCESSOR_0
        PROCESSOR_0_OF_2
        PROCESSOR_1_OF_2
        PROCESSOR_0_OF_3
        PROCESSOR_1_OF_3
        PROCESSOR_2_OF_3

    Optional keyword arg "raise_on_error" raises an exception on the
    first unexpected exception or failure. This allows failures to be
    post-mortem debugged.
    """
    # If no module was given, then use __main__.
    if m is None:
        # DWA - m will still be None if this wasn't invoked from the command
        # line, in which case the following TypeError is about as good an error
        # as we should expect
        m = sys.modules.get('__main__')

    # Check that we were actually given a module.
    if not inspect.ismodule(m):
        raise TypeError("testmod: module required; %r" % (m, ))

    # If no name was given, then use the module's name.
    if name is None:
        name = m.__name__

    # Find, parse, and run all tests in the given module.
    finder = doctest.DocTestFinder(exclude_empty=exclude_empty,
                                   parser=_SelectiveDocTestParser())

    if raise_on_error:
        runner = doctest.DebugRunner(verbose=verbose, optionflags=optionflags)
    else:
        runner = doctest.DocTestRunner(verbose=verbose,
                                       optionflags=optionflags)

    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
        runner.run(test)

    if report:
        runner.summarize()
        report_skips()

    return doctest.TestResults(runner.failures, runner.tries)
Beispiel #15
0
    def _DocTestRunner__run(self, test, compileflags, out):
        """
        Run the examples in `test`.  Write the outcome of each example
        with one of the `DocTestRunner.report_*` methods, using the
        writer function `out`.  `compileflags` is the set of compiler
        flags that should be used to execute examples.  Return a tuple
        `(f, t)`, where `t` is the number of examples tried, and `f`
        is the number of examples that failed.  The examples are run
        in the namespace `test.globs`.
        """
        # Keep track of the number of failures and tries.
        failures = tries = 0

        # Save the option flags (since option directives can be used
        # to modify them).
        original_optionflags = self.optionflags

        SUCCESS, FAILURE, BOOM = range(3)  # `outcome` state

        check = self._checker.check_output

        # Process each example.
        for examplenum, example in enumerate(test.examples):

            # If REPORT_ONLY_FIRST_FAILURE is set, then suppress
            # reporting after the first failure.
            quiet = (self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE
                     and failures > 0)

            # Merge in the example's options.
            self.optionflags = original_optionflags
            if example.options:
                for (optionflag, val) in example.options.items():
                    if val:
                        self.optionflags |= optionflag
                    else:
                        self.optionflags &= ~optionflag

            # If 'SKIP' is set, then skip this example.
            if self.optionflags & doctest.SKIP:
                continue

            # Record that we started this example.
            tries += 1
            if not quiet:
                self.report_start(out, test, example)

            # Use a special filename for compile(), so we can retrieve
            # the source code during interactive debugging (see
            # __patched_linecache_getlines).
            filename = '<doctest %s[%d]>' % (test.name, examplenum)

            # Run the example in the given context (globs), and record
            # any exception that gets raised.  (But don't intercept
            # keyboard interrupts.)
            try:
                # Don't blink!  This is where the user's code gets run.
                exec(
                    compile(example.source, filename, "exec", compileflags, 1),
                    test.globs)
                self.debugger.set_continue()  # ==== Example Finished ====
                exception = None
            except KeyboardInterrupt:
                raise
            except Exception:
                exception = sys.exc_info()
                self.debugger.set_continue()  # ==== Example Finished ====

            got = self._fakeout.getvalue()  # the actual output
            self._fakeout.truncate(0)
            outcome = FAILURE  # guilty until proved innocent or insane

            # If the example executed without raising any exceptions,
            # verify its output.
            if exception is None:
                if check(example.want, got, self.optionflags):
                    outcome = SUCCESS

            # The example raised an exception:  check if it was expected.
            else:
                exc_msg = traceback.format_exception_only(*exception[:2])[-1]
                if not quiet:
                    got += doctest._exception_traceback(exception)

                # If `example.exc_msg` is None, then we weren't expecting
                # an exception.
                if example.exc_msg is None:
                    outcome = BOOM

                # We expected an exception:  see whether it matches.
                elif check(example.exc_msg, exc_msg, self.optionflags):
                    outcome = SUCCESS

                # Another chance if they didn't care about the detail.
                elif self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL:
                    if check(doctest._strip_exception_details(example.exc_msg),
                             doctest._strip_exception_details(exc_msg),
                             self.optionflags):
                        outcome = SUCCESS

            # Report the outcome.
            if outcome is SUCCESS:
                if not quiet:
                    self.report_success(out, test, example, got)
            elif outcome is FAILURE:
                if not quiet:
                    self.report_failure(out, test, example, got)
                failures += 1
            elif outcome is BOOM:
                if not quiet:
                    self.report_unexpected_exception(out, test, example,
                                                     exception)
                failures += 1
            else:
                assert False, ("unknown outcome", outcome)

            if failures and self.optionflags & doctest.FAIL_FAST:
                break

        # Restore the option flags (in case they were modified)
        self.optionflags = original_optionflags

        # Record and return the number of failures and tries.
        self._DocTestRunner__record_outcome(test, failures, tries)
        return doctest.TestResults(failures, tries)
Beispiel #16
0
 def test_missing_challenge_is_created_and_tests_and_runs(self):
     self.check_test_and_run_challenge(
         2020, 1, 'a', True, [], False, None,
         (True, doctest.TestResults(failed=1, attempted=1), None))