def run_doctest(name, doctest_string, global_environment): """ Run a single test with given ``global_environment``. Returns ``(True, '')`` if the doctest passes. Returns ``(False, failure_message)`` if the doctest fails. Args: name (``str``): name of doctest doctest_string (``str``): doctest in string form global_environment (``dict``): global environment resulting from the execution of a python script/notebook Returns: ``tuple`` of (``bool``, ``str``): results from running the test """ examples = doctest.DocTestParser().parse(doctest_string, name) test = doctest.DocTest( [e for e in examples if isinstance(e, doctest.Example)], global_environment, name, None, None, doctest_string) doctestrunner = doctest.DocTestRunner(verbose=True) runresults = io.StringIO() with redirect_stdout(runresults), redirect_stderr( runresults), hide_outputs(): doctestrunner.run(test, clear_globs=False) with open(os.devnull, 'w') as f, redirect_stderr(f), redirect_stdout(f): result = doctestrunner.summarize(verbose=True) # An individual test can only pass or fail if result.failed == 0: return (True, '') else: return False, runresults.getvalue()
def rundoctest(text, ns=None, eraise=False): """Run a the input source as a doctest, in the caller's namespace. :Parameters: text : str Source to execute. :Keywords: ns : dict (None) Namespace where the code should be executed. If not given, the caller's locals and globals are used. eraise : bool (False) If true, immediately raise any exceptions instead of reporting them at the end. This allows you to then do interactive debugging via IPython's facilities (use %debug after the fact, or with %pdb for automatic activation). """ name = 'interactive doctest' filename = '<IPython console>' if eraise: runner = doctest.DebugRunner() else: runner = doctest.DocTestRunner() parser = doctest.DocTestParser() if ns is None: f = sys._getframe(1) ns = f.f_globals.copy() ns.update(f.f_locals) test = parser.get_doctest(text, ns, name, filename, 0) runner.run(test) runner.summarize(True)
def test(gentests=False): import doctest, sys if gentests: R = [].append if sys.version_info[0] > 2: from reportPackages.rlextra.radxml import xmlutils as mod class Py23DocChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): want = re.sub("u'(.*?)'", "'\\1'", want) want = re.sub('u"(.*?)"', '"\\1"', want) return doctest.OutputChecker.check_output( self, want, got, optionflags) checker = Py23DocChecker() if gentests: R(doctest.DocTestSuite(checker=checker)) else: finder = doctest.DocTestFinder(exclude_empty=False) runner = doctest.DocTestRunner(checker=checker) for test in finder.find(mod, mod.__name__, globs=None, extraglobs=None): runner.run(test) runner.summarize() elif gentests: R(doctest.DocTestSuite()) else: doctest.testmod() return R.__self__
def test_file(self, filename): import doctest import unittest from StringIO import StringIO rel_name = filename[len(self._root_dir)+1:] module = rel_name.replace(os.sep, '.')[:-3] setup_pprint() try: module = doctest._normalize_module(module) tests = doctest.DocTestFinder().find(module) except: self._reporter.import_error(filename, sys.exc_info()) return tests.sort() tests = [test for test in tests if len(test.examples) > 0] self._reporter.entering_filename(filename, len(tests)) for test in tests: assert len(test.examples) != 0 runner = doctest.DocTestRunner(optionflags=doctest.ELLIPSIS | \ doctest.NORMALIZE_WHITESPACE) old = sys.stdout new = StringIO() sys.stdout = new try: f, t = runner.run(test, out=new.write, clear_globs=False) finally: sys.stdout = old if f > 0: self._reporter.doctest_fail(test.name, new.getvalue()) else: self._reporter.test_pass() self._reporter.leaving_filename()
def test(f): """Run unit tests defined in a function's docstring (doctests)""" tests = doctest.DocTestFinder().find(f) assert len(tests) <= 1 for test in tests: # We redirect stdout to a string, so we can tell if the tests worked out or not orig_stdout = sys.stdout sys.stdout = io.StringIO() orig_rng_state = np.random.get_state() try: np.random.seed(1) results: doctest.TestResults = doctest.DocTestRunner().run(test) output = sys.stdout.getvalue() finally: sys.stdout = orig_stdout np.random.set_state(orig_rng_state) if results.failed > 0: print( f"❌ The are some issues with your implementation of `{f.__name__}`:" ) print(output, end="") print( "**********************************************************************" ) elif results.attempted > 0: print(f"✅ Your `{f.__name__}` passes some basic tests.") else: print(f"Could not find any tests for {f.__name__}")
def __call__(self, global_environment): """ Run test with given global_environment. """ test = doctest.DocTest( [e for e in self.examples if type(e) is doctest.Example], global_environment, self.name, None, None, self.doctest_string ) doctestrunner = doctest.DocTestRunner(verbose=True) runresults = io.StringIO() with redirect_stdout(runresults), redirect_stderr(runresults): doctestrunner.run(test, clear_globs=False) with open('/dev/null', 'w') as f, redirect_stderr(f), redirect_stdout(f): result = doctestrunner.summarize(verbose=True) score = 1.0 - (result.failed / result.attempted) if score == 1.0: summary = 'Test {} passed!'.format(self.name) else: summary = self.PLAIN_TEXT_FAILURE_SUMMARY_TEMPLATE.format( name=self.name, doctest_string=dedent(self.doctest_string), runresults=runresults.getvalue() ) return TestResult(score, summary)
def _run_object_doctest(obj, module): # Direct doctest output (normally just errors) to real stdout; doctest # output shouldn't be compared by regrtest. save_stdout = sys.stdout sys.stdout = test.test_support.get_original_stdout() try: finder = doctest.DocTestFinder(verbose=verbose, recurse=False) runner = doctest.DocTestRunner(verbose=verbose) # Use the object's fully qualified name if it has one # Otherwise, use the module's name try: name = "%s.%s" % (obj.__module__, obj.__name__) except AttributeError: name = module.__name__ for example in finder.find(obj, name, module): runner.run(example) f, t = runner.failures, runner.tries if f: raise test.test_support.TestFailed("%d of %d doctests failed" % (f, t)) finally: sys.stdout = save_stdout if verbose: print 'doctest (%s) ... %d tests with zero failures' % ( module.__name__, t) return f, t
def _run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = doctest.DocTestFinder(verbose=verbose, recurse=False) runner = doctest.DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags) # ATK adds these two lines: runner.summarize() return doctest.TestResults(runner.failures, runner.tries)
def _run_doctest(self, *fnames): here = os.path.dirname(__file__) sqla_base = os.path.normpath(os.path.join(here, "..", "..")) optionflags = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL | _get_allow_unicode_flag()) runner = doctest.DocTestRunner( verbose=None, optionflags=optionflags, checker=_get_unicode_checker(), ) parser = doctest.DocTestParser() globs = {"print_function": print_function} for fname in fnames: path = os.path.join(sqla_base, "doc/build", fname) if not os.path.exists(path): config.skip_test("Can't find documentation file %r" % path) with open(path, encoding="utf-8") as file_: content = file_.read() content = re.sub(r"{(?:stop|sql|opensql)}", "", content) test = parser.get_doctest(content, globs, fname, fname, 0) runner.run(test, clear_globs=False) runner.summarize() globs.update(test.globs) assert not runner.failures
def run_doctest(name, doctest_string, global_environment): """ Run a single test with given global_environment. Returns (True, '') if the doctest passes. Returns (False, failure_message) if the doctest fails. """ examples = doctest.DocTestParser().parse(doctest_string, name) test = doctest.DocTest( [e for e in examples if isinstance(e, doctest.Example)], global_environment, name, None, None, doctest_string, ) doctestrunner = doctest.DocTestRunner(verbose=True) runresults = io.StringIO() with redirect_stdout(runresults), redirect_stderr( runresults), hide_outputs(): doctestrunner.run(test, clear_globs=False) with open("/dev/null", "w") as f, redirect_stderr(f), redirect_stdout(f): result = doctestrunner.summarize(verbose=True) # An individual test can only pass or fail if result.failed == 0: return (True, "") else: return False, runresults.getvalue()
def doctest_modules(modules, verbose=False, print_info=True, extraglobs=dict()): finder = doctest.DocTestFinder(parser=DocTestParser()) # full_extraglobals = dict(globs.items() + extraglobs.items()) full_extraglobals = globs.copy() full_extraglobals.update(extraglobs) failed, attempted = 0, 0 for module in modules: if isinstance(module, types.ModuleType): runner = doctest.DocTestRunner(verbose=verbose) for test in finder.find(module, extraglobs=full_extraglobals): runner.run(test) result = runner.summarize() else: result = module(verbose=verbose) failed += result.failed attempted += result.attempted if print_info: print_results(module, result) if print_info: print('\nAll doctests:\n %s failures out of %s tests.' % (failed, attempted)) return doctest.TestResults(failed, attempted)
def doctest_region(): """Run doctests in the current region. Note that this will essentially look at the current region as a 'doctest file', not as python code; if you want to run doctests in python code, use exec_and_doctest_region(). Any errors in the doctests are placed in an emacs buffer called 'Doctest output'. """ text = get_region() parser = doctest.DocTestParser() test = parser.get_doctest(text, _py_globals, "<emacs text selection>", "<emacs text selection>", 0) buf = io.StringIO() runner = doctest.DocTestRunner(verbose=1) runner.run(test, out=buf.write) if runner.failures: out = buf.getvalue() insert_in_other_buffer("Doctest output", out) else: lisp.message("All %d doctest(s) passed." % (runner.tries))
def test(**kwargs): import doctest doctest.NORMALIZE_WHITESPACE = 1 verbosity = kwargs.get('verbose', 0) if verbosity == 0: print('Running doctests...') # ignore py2-3 unicode differences import re class Py23DocChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): if sys.version_info[0] == 2: got = re.sub("u'(.*?)'", "'\\1'", got) got = re.sub('u"(.*?)"', '"\\1"', got) res = doctest.OutputChecker.check_output(self, want, got, optionflags) return res def summarize(self): doctest.OutputChecker.summarize(True) # run tests runner = doctest.DocTestRunner(checker=Py23DocChecker(), verbose=verbosity) with open("README.md","rb") as fobj: test = doctest.DocTestParser().get_doctest(string=fobj.read().decode("utf8"), globs={}, name="README", filename="README.md", lineno=0) failure_count, test_count = runner.run(test) # print results if verbosity: runner.summarize(True) else: if failure_count == 0: print('All test passed successfully') elif failure_count > 0: runner.summarize(verbosity) return failure_count
def test_docstrings(module, monkeypatch): # Always use pre-Python 3.9 implementation monkeypatch.setattr(PurePosixPath, "relative_to", relative_to) # Check that we were actually given a module. if inspect.ismodule(module): print(f"Running doctest in {module!r}".center( shutil.get_terminal_size().columns, '=')) else: raise TypeError(f"testmod: module required; {module!r}") with redirect_output(combine=True) as (stdout, stderr): # Find, parse, and run all tests in the given module. finder = doctest.DocTestFinder() runner = doctest.DocTestRunner(verbose=VERBOSE >= 2) for test in finder.find(module, module.__name__): runner.run(test) runner.summarize(verbose=bool(VERBOSE)) # results = doctest.TestResults(runner.failures, runner.tries) print(indent(stdout.getvalue(), " ")) if runner.failures: pytest.fail(msg=f"{runner.failures} tests failed")
def run_doctest(obj, state: Dict[str, Any] = None, check: bool = True): """ Run doctest on the object provided. Globals should be passed via the ``globs`` key within the ``state`` dict. Globals are a key value mapping of the name of the global to the object it will be. You probably want to use state like so .. code-block:: python run_doctest(func, state={"globs": globals()}) """ if state is None: state = {} state.setdefault("verbose", False) state.setdefault("globs", {}) finder = doctest.DocTestFinder(verbose=state["verbose"], recurse=False) runner = doctest.DocTestRunner(verbose=state["verbose"]) for test in finder.find(obj, obj.__qualname__, globs=state["globs"]): output = io.StringIO() results = runner.run(test, out=output.write) if results.failed and check: raise Exception(output.getvalue())
def rundocs(filename=None, raise_on_error=True): """Run doc string tests found in file. By default raises AssertionError on failure. """ import doctest, imp if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] path = [os.path.dirname(filename)] file, pathname, description = imp.find_module(name, path) try: m = imp.load_module(name, file, pathname, description) finally: file.close() tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) msg = [] if raise_on_error: out = lambda s: msg.append(s) else: out = None for test in tests: runner.run(test, out=out) if runner.failures > 0 and raise_on_error: raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def test(f): # The `globs` defines the variables, functions and packages allowed in the docstring. tests = doctest.DocTestFinder().find(f) assert len(tests) <= 1 for test in tests: # We redirect stdout to a string, so we can tell if the tests worked out or not orig_stdout = sys.stdout sys.stdout = io.StringIO() try: results: doctest.TestResults = doctest.DocTestRunner().run(test) output = sys.stdout.getvalue() finally: sys.stdout = orig_stdout if results.failed > 0: print( f"❌ The are some issues with your implementation of `{f.__name__}`:" ) print(output, end="") print( "**********************************************************************" ) elif results.attempted > 0: print(f"✅ Your `{f.__name__}` passed {results.attempted} tests.") else: print(f"Could not find any tests for {f.__name__}")
def __call__(self, global_environment): """ Run test with given global_environment. """ test = doctest.DocTest( [e for e in self.examples if isinstance(e, doctest.Example)], global_environment, self.name, None, None, self.doctest_string ) doctestrunner = doctest.DocTestRunner(verbose=True) runresults = io.StringIO() with redirect_stdout(runresults), redirect_stderr(runresults): doctestrunner.run(test, clear_globs=False) with open('/dev/null', 'w') as f, redirect_stderr(f), redirect_stdout(f): result = doctestrunner.summarize(verbose=True) # An individual test can only pass or fail if result.failed == 0: grade = 1.0 else: grade = 0.0 if grade == 1.0: summary = 'Test {} passed!'.format(self.name) else: summary = self.PLAIN_TEXT_SUMMARY_TEMPLATE.format( name=self.name, doctest_string=dedent(self.doctest_string), runresults=runresults.getvalue() ) return TestResult(grade, summary)
def run_tests(ipynb_path, globs): base_path = os.path.dirname(ipynb_path) test_files = glob(os.path.join(base_path, 'tests/q*.py')) tests = [] doctestparser = doctest.DocTestParser() results = [] for test_file in test_files: test_file_globals = {} with open(test_file) as f: doctestrunner = doctest.DocTestRunner() exec(f.read(), test_file_globals) defined_test = test_file_globals['test'] assert len(defined_test['suites']) == 1 assert defined_test['points'] == 1 for case in defined_test['suites'][0]['cases']: examples = doctestparser.parse( case['code'], defined_test['name'], ) test = doctest.DocTest( [e for e in examples if type(e) is doctest.Example], globs, defined_test['name'], None, None, None) with open('/dev/null', 'w') as f, redirect_stdout(f), redirect_stderr(f): doctestrunner.run(test, clear_globs=False) with open('/dev/null', 'w') as f, redirect_stdout(f), redirect_stderr(f): result = doctestrunner.summarize() results.append(1 if result.failed == 0 else 0) return (sum(results) / len(results))
def _run_test_file_with_config(filename, globs, optionflags): """Modified from doctest.py to use custom checker.""" text, filename = _load_testfile(filename) name = os.path.basename(filename) if globs is None: globs = {} else: globs = globs.copy() if '__name__' not in globs: globs['__name__'] = '__main__' checker = Py23DocChecker() runner = doctest.DocTestRunner(checker=checker, verbose=None, optionflags=optionflags) parser = doctest.DocTestParser() test = parser.get_doctest(text, globs, name, filename, 0) runner.run(test) runner.summarize() if doctest.master is None: doctest.master = runner else: doctest.master.merge(runner) return doctest.TestResults(runner.failures, runner.tries)
def test_doctest(package_name, context_package_names): """ Run all doctest strings in all Biotite subpackages. """ # Collect all attributes of this package and its subpackages # as globals for the doctests globs = {} mod_names = [] #The package itself is also used as context for name in context_package_names + [package_name]: context_package = import_module(name) globs.update({ attr: getattr(context_package, attr) for attr in dir(context_package) }) # Add fixed names for certain paths globs["path_to_directory"] = tempfile.gettempdir() globs["path_to_structures"] = join(".", "tests", "structure", "data") globs["path_to_sequences"] = join(".", "tests", "sequence", "data") # Add frequently used modules globs["np"] = np # Add frequently used objects globs["atom_array_stack"] = strucio.load_structure( join(".", "tests", "structure", "data", "1l2y.mmtf")) globs["atom_array"] = globs["atom_array_stack"][0] # Adjust NumPy print formatting np.set_printoptions(precision=3, floatmode="maxprec_equal") # Run doctests # This test does not use 'testfile()' or 'testmod()' # due to problems with doctest identification for Cython modules # More information below package = import_module(package_name) runner = doctest.DocTestRunner(verbose=False, optionflags=doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE) for test in doctest.DocTestFinder(exclude_empty=False).find( package, package.__name__, # It is necessary to set 'module' to 'False', as otherwise # Cython functions and classes would be falsely identified # as members of an external module by 'DocTestFinder._find()' # and consequently would be ignored # # Setting 'module=False' omits this check # This check is not necessary as the biotite subpackages # ('__init__.py' modules) should only contain attributes, that # are part of the package itself. module=False, extraglobs=globs): runner.run(test) results = doctest.TestResults(runner.failures, runner.tries) try: assert results.failed == 0 except AssertionError: print(f"Failing doctest in module {package}") raise
def run_doctest(obj, name): p = doctest.DocTestParser() t = p.get_doctest(obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0) r = doctest.DocTestRunner() output = StringIO() r.run(t, out=output.write) return r.failures, output.getvalue()
def test_runner(self, *args, **kwargs): tests = doctest.DocTestFinder().find(func) runner = doctest.DocTestRunner() for t in tests: runner.run(t) runner.summarize() self.assertEquals(runner.failures, 0) return func(self, *args, **kwargs)
def runtest(filename, runner=doctest.DocTestRunner()): if os.path.isfile(filename): _f = open(filename) else: _f = open(os.path.sep.join([os.getenv('CHECKMATE_HOME'), filename])) test = _f.read() _f.close() return runner.run(doctest.DocTestParser().get_doctest( test, locals(), filename, None, None))
def run_tests(extraglobs, optionflags, **kwargs): # The patched one. tests = parse_rst_ipython_tests(rst, name, extraglobs, optionflags) runner = doctest.DocTestRunner(optionflags=optionflags) set_pandas_options() result = runner.run(tests, **kwargs) if report: runner.summarize() return result
def run_doctest(obj, state, check=True): finder = doctest.DocTestFinder(verbose=state["verbose"], recurse=False) runner = doctest.DocTestRunner(verbose=state["verbose"]) for test in finder.find(obj, obj.__qualname__, globs=state["globs"]): output = io.StringIO() results = runner.run(test, out=output.write) if results.failed and check: raise Exception(output.getvalue())
def test_readme_doctest(): readme_filename = os.path.join(os.path.dirname(__file__), "..", "README.md") with open(readme_filename) as f: readme_text = f.read() t = doctest.DocTestParser().get_doctest(readme_text, {}, "<readme>", readme_filename, 0) result = doctest.DocTestRunner().run(t) assert result.failed == 0
def exec_and_doctest_region(): """This function executes the current region as a Python statement or series of statements. Stdout and stderr are redirected to an emacs buffer called 'Python output'. If any of the objects produced by the selected code (functions, classes, etc) have doctests, they are executed automatically, and any errors in them are placed in an emacs buffer called 'Doctest output'.""" code_str = get_region() temp_stdout = io.StringIO() tempLocals = {} old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = temp_stdout sys.stderr = temp_stdout try: exec(code_str, _py_globals, tempLocals) finally: sys.stdout = old_stdout sys.stderr = old_stderr doctests = [] finder = doctest.DocTestFinder() for obj in list(tempLocals.values()): if hasattr(obj, "__name__"): doctests.extend(finder.find(obj)) buf = io.StringIO() runner = doctest.DocTestRunner(verbose=1) for test in doctests: runner.run(test, out=buf.write) if runner.failures: out = buf.getvalue() insert_in_other_buffer("Doctest output", out) else: if runner.tries: test_info_str = "All %d doctest(s) passed." % (runner.tries) else: test_info_str = "" _py_globals.update(tempLocals) out = temp_stdout.getvalue() if out: insert_in_other_buffer("Python output", out) messageStr = "Python code executed " \ "successfully. %s" % test_info_str lisp.message(messageStr)
def __init__(self, verbose=False, unitest=False): self.verbose = verbose self.unitest = unitest if unitest: self.DocTestSuite = doctest.DocTestSuite self.unitTestRunner = unittest.TextTestRunner(verbosity=verbose) else: self.docTestParser = doctest.DocTestParser() self.docTestRunner = doctest.DocTestRunner(verbose=verbose)
def test_doctest(module): mod = __import__(module, None, None, ['x']) finder = doctest.DocTestFinder() tests = finder.find(mod, mod.__name__) for test in tests: runner = doctest.DocTestRunner(verbose=True) failures, tries = runner.run(test) if failures: pytest.fail("doctest failed: " + test.name)