def print_script(filename, simulator): parser = doctest.DocTestParser() s = open(filename).read() script = "".join([ ex.source for ex in parser.get_examples(s) if "+SKIP" not in ex.source ]) print("from pyNN.%s import *\nsetup(max_delay=10.0, debug=True)\n%s" % (simulator, script))
def test_doctests(self, mocked_doctest_runner) -> None: import doctest parser = doctest.DocTestParser() assert approx.__doc__ is not None test = parser.get_doctest(approx.__doc__, {"approx": approx}, approx.__name__, None, None) mocked_doctest_runner.run(test)
def run_doctest(obj, name): p = doctest.DocTestParser() t = p.get_doctest(obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0) r = doctest.DocTestRunner() output = StringIO.StringIO() r.run(t, out=output.write) return r.failures, output.getvalue()
def runtest(filename, runner=doctest.DocTestRunner()): if os.path.isfile(filename): _f = open(filename) else: _f = open(os.path.sep.join([os.getenv('CHECKMATE_HOME'), filename])) test = _f.read() _f.close() return runner.run(doctest.DocTestParser().get_doctest( test, locals(), filename, None, None))
def test_readme_doctest(): readme_filename = os.path.join(os.path.dirname(__file__), "..", "README.md") with open(readme_filename) as f: readme_text = f.read() t = doctest.DocTestParser().get_doctest(readme_text, {}, "<readme>", readme_filename, 0) result = doctest.DocTestRunner().run(t) assert result.failed == 0
def __init__(self, verbose=False, unitest=False): self.verbose = verbose self.unitest = unitest if unitest: self.DocTestSuite = doctest.DocTestSuite self.unitTestRunner = unittest.TextTestRunner(verbosity=verbose) else: self.docTestParser = doctest.DocTestParser() self.docTestRunner = doctest.DocTestRunner(verbose=verbose)
def get_examples(pkg): doc = pkg.__doc__ parser = doctest.DocTestParser() es = parser.get_examples(doc) rst = [] for e in es: rst.append('>>> ' + e.source.strip()) rst.append(e.want.strip()) return '\n'.join(rst)
def collect_doctest(object, vars, name): doctest_suite = doctest.DocTestSuite() test_case = doctest.DocTestParser().get_doctest(object, vars, name, name, 1) test_case.examples and doctest_suite.addTest( doctest.DocTestCase(test_case, doctest.ELLIPSIS)) test_case = InlineDoctestParser().get_doctest(object, vars, name, name, 1) test_case.examples and doctest_suite.addTest( doctest.DocTestCase(test_case, checker=NullOutputCheck)) if doctest_suite._tests: return doctest_suite
def configure(self, options, config): Plugin.configure(self, options, config) self.doctest_tests = options.doctest_tests self.extension = tolist(options.doctestExtension) self.parser = doctest.DocTestParser() self.finder = DocTestFinder() self.checker = IPDoctestOutputChecker() self.globs = None self.extraglobs = None
def test_doctests(self): parser = doctest.DocTestParser() test = parser.get_doctest( approx.__doc__, {'approx': approx}, approx.__name__, None, None, ) runner = MyDocTestRunner() runner.run(test)
def get_test_str(object: str): vars = get_ipython().user_ns name = get_ipython().user_module.__name__ doctest_suite = doctest.DocTestSuite() test_case = doctest.DocTestParser().get_doctest(object, vars, name, name, 1) test_case.examples and doctest_suite.addTest( doctest.DocTestCase(test_case, doctest.ELLIPSIS)) if doctest_suite._tests: return doctest_suite
def runScipyInstance(jsonrequest,outQueue): """ run a new python instance and test the code""" #laod json data in python object try: jsonrequest = json.loads(jsonrequest) solution = str(jsonrequest["solution"]) tests = str(jsonrequest["tests"]) except: responseDict = {'errors': 'Bad request'} logging.error("Bad request") responseJSON = json.dumps(responseDict) outQueue.put(responseJSON) return oldfile = sys.stdout sys.stdout = newfile = StringIO.StringIO() def ExecutionError(): """ catch all the execution error, for the solution and each test """ sys.stdout = oldfile errors = traceback.format_exc() logging.info("Python verifier returning errors =%s", errors) responseDict = {'errors': '%s' % errors} responseJSON = json.dumps(responseDict) outQueue.put(responseJSON) try: # import numpy testing and execute solution namespace = {} compiled = compile("from numpy.testing import *\nimport numpy\nimport scipy", 'submitted code', 'exec') exec compiled in namespace compiled = compile(solution, 'submitted code', 'exec') exec compiled in namespace namespace['YOUR_SOLUTION'] = solution.strip() namespace['LINES_IN_YOUR_SOLUTION'] = len(solution.strip().splitlines()) except: ExecutionError() return #get tests try: test_cases = doctest.DocTestParser().get_examples(tests) except: ExecutionError() return results = execute_test_cases(test_cases, namespace,ExecutionError) if results==None: return sys.stdout = oldfile printed = newfile.getvalue() results["printed"] = printed responseJSON = json.dumps(results) logging.info("Python verifier returning %s",responseJSON) outQueue.put(responseJSON)
def __init__(self, *args, **kwargs): doctest_str = '\n'.join((api_example_str, self.get_readme_str())) sample = doctest.DocTestParser().get_doctest(doctest_str, globs={}, name='test_doc', filename=None, lineno=None) super().__init__(sample, *kwargs)
def num_examples_test(self, fname, settings): '''Check that there are at least NUM_EXAMPLES examples in the docstring. ''' _, doc = self.exists_test(fname, settings) num_examples = len(doctest.DocTestParser().get_examples(doc)) msg = EXAMPLE_FAILURE_MESSAGE.format( fname, settings[NUM_EXAMPLES], num_examples) self.assertTrue(num_examples >= settings[NUM_EXAMPLES], msg)
def teststring(text, report=True, **runner_kwargs): parser = doctest.DocTestParser() runner = BeamDataframeDoctestRunner(TestEnvironment(), **runner_kwargs) test = parser.get_doctest(text, { 'pd': runner.fake_pandas_module(), 'np': np }, '<string>', '<string>', 0) result = runner.run(test) if report: runner.summarize() return result
def _run_doctest_for_content(self, name, content): optionflags = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | _get_allow_unicode_flag()) runner = doctest.DocTestRunner(verbose=None, optionflags=optionflags, checker=_get_unicode_checker()) globs = {'print_function': print_function} parser = doctest.DocTestParser() test = parser.get_doctest(content, globs, name, name, 0) runner.run(test) runner.summarize() assert not runner.failures
def _tests_from_text_file(self, filename): """ Load tests from a non-python text file. """ f = open(filename, 'r') text = f.read() f.close() parser = doctest.DocTestParser() test = parser.get_doctest(text, {}, '', filename, 0) if test.name.startswith('.'): test.name = test.name[1:] return [test]
def configure(self, options, config): # parent method sets enabled flag from command line --with-numpydoctest Plugin.configure(self, options, config) self.finder = self.test_finder_class() self.parser = doctest.DocTestParser() if self.enabled: # Pull standard doctest out of plugin list; there's no reason to run # both. In practice the Unplugger plugin above would cover us when # run from a standard numpy.test() call; this is just in case # someone wants to run our plugin outside the numpy.test() machinery config.plugins.plugins = [p for p in config.plugins.plugins if p.name != 'doctest']
def loadTestsFromFile(self, filename): """Load doctests from the file. Tests are loaded only if filename's extension matches configured doctest extension. """ if self.extension and anyp(filename.endswith, self.extension): name = os.path.basename(filename) dh = open(filename) try: doc = dh.read() finally: dh.close() fixture_context = None globs = {'__file__': filename} if self.fixtures: base, ext = os.path.splitext(name) dirname = os.path.dirname(filename) sys.path.append(dirname) fixt_mod = base + self.fixtures try: fixture_context = __import__(fixt_mod, globals(), locals(), ["nop"]) except ImportError as e: log.debug("Could not import %s: %s (%s)", fixt_mod, e, sys.path) log.debug("Fixture module %s resolved to %s", fixt_mod, fixture_context) if hasattr(fixture_context, 'globs'): globs = fixture_context.globs(globs) parser = doctest.DocTestParser() test = parser.get_doctest(doc, globs=globs, name=name, filename=filename, lineno=0) if test.examples: case = DocFileCase(test, optionflags=self.optionflags, setUp=getattr(fixture_context, 'setup_test', None), tearDown=getattr(fixture_context, 'teardown_test', None), result_var=self.doctest_result_var) if fixture_context: yield ContextList((case, ), context=fixture_context) else: yield case else: yield False # no tests to load
def addDocAttrTestsToSuite(suite, moduleVariableLists, outerFilename=None, globs=False, optionflags=( doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE )): ''' takes a suite, such as a doctest.DocTestSuite and the list of variables in a module and adds from those classes that have a _DOC_ATTR dictionary (which documents the properties in the class) any doctests to the suite. >>> import doctest >>> s1 = doctest.DocTestSuite(chord) >>> s1TestsBefore = len(s1._tests) >>> allLocals = [getattr(chord, x) for x in dir(chord)] >>> test.testRunner.addDocAttrTestsToSuite(s1, allLocals) >>> s1TestsAfter = len(s1._tests) >>> s1TestsAfter - s1TestsBefore 1 >>> t = s1._tests[-1] >>> t isRest () >>> 'hi' 'hi' ''' dtp = doctest.DocTestParser() if globs is False: globs = __import__(defaultImports[0]).__dict__.copy() elif globs is None: globs = {} for lvk in moduleVariableLists: if not (inspect.isclass(lvk)): continue docattr = getattr(lvk, '_DOC_ATTR', None) if docattr is None: continue for dockey in docattr: documentation = docattr[dockey] # print(documentation) dt = dtp.get_doctest(documentation, globs, dockey, outerFilename, 0) if not dt.examples: continue dtc = doctest.DocTestCase(dt, optionflags=optionflags, ) # print(dtc) suite.addTest(dtc)
def clean_doctest(self): try: lines = doctest.DocTestParser().get_examples(self.source_block) except ValueError: return None source_lines = [source_line for line in lines for source_line in self._overwritten_source(line.source, line.lineno)] if source_lines: self._source_lines = source_lines return True return False
def configure(self, options, config): Plugin.configure(self, options, config) # Pull standard doctest plugin out of config; we will do doctesting config.plugins.plugins = [p for p in config.plugins.plugins if p.name != 'doctest'] self.doctest_tests = options.doctest_tests self.extension = tolist(options.doctestExtension) self.parser = doctest.DocTestParser() self.finder = DocTestFinder() self.checker = IPDoctestOutputChecker() self.globs = None self.extraglobs = None
def __init__(self, optionflags=0, checker=None, parser=None): self.runner = DocTestRunner(optionflags=optionflags, checker=checker, verbose=False) self.debug_runner = DebugRunner(optionflags=optionflags, verbose=False) def evaluate_closure(region, document, globs): # capture "self" evaluate(self, region, document, globs) parser = parser or doctest.DocTestParser() manuel.Manuel.__init__( self, [lambda document: parse(self, document, parser)], [evaluate_closure], [format])
def _parse_docstring(node): """Extract code from docstring.""" docstring = ast.get_docstring(node) if docstring: parser = doctest.DocTestParser() try: dt = parser.get_doctest(docstring, {}, None, None, None) except ValueError: # >>> 'abc' pass else: examples = dt.examples return '\n'.join([example.source for example in examples]) return None
def __init__(self, verbose=False, parser=doctest.DocTestParser(), recurse=True, exclude_empty=True, parser_class: Type[ DocTestFilterParser] = IndexedDocTestFilterParser, filters_text=""): super().__init__(verbose=verbose, parser=parser, recurse=recurse, exclude_empty=exclude_empty) self.filters: List[DocTestFilter] = \ parser_class().parse_filters(filters_text)
def __init__(self, *args, **kw): BaseTestCase.__init__(self, *args, **kw) # Ugh, copied from doctest.DocFileTest, why isn't this in # doctest.DocFileCase.__init__? doc, path = doctest._load_testfile(self.path, tests, True) name = os.path.basename(path) globs = dict(self=self, do=self.shell, __file__=path) self._dt_optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE | doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) self._dt_checker = None self._dt_test = doctest.DocTestParser().get_doctest( doc, globs, name, path, 0)
def runRInstance(jsonrequest, outQueue): """ run a new python instance and test the R code""" #laod json data in python object try: jsonrequest = json.loads(jsonrequest) solution = str(jsonrequest["solution"]) tests = str(jsonrequest["tests"]) except: responseDict = {'errors': 'Bad request'} logging.error("Bad request") responseJSON = json.dumps(responseDict) outQueue.put(responseJSON) return oldfile = sys.stdout sys.stdout = newfile = StringIO.StringIO() def ExecutionError(): """ catch all the execution error, for the solution and each test """ sys.stdout = oldfile errors = traceback.format_exc() logging.info("Python verifier returning errors =%s", errors) responseDict = {'errors': '%s' % errors} responseJSON = json.dumps(responseDict) outQueue.put(responseJSON) try: # import RUnit , testthat and execute solution r("library('RUnit','/home/server/libs/')") r("library('testthat','/home/server/libs/')") r(solution) except: ExecutionError() return #get tests try: test_cases = doctest.DocTestParser().get_examples(tests) except: ExecutionError() return results = execute_test_cases(test_cases, ExecutionError) sys.stdout = oldfile printed = newfile.getvalue() results["printed"] = "" responseJSON = json.dumps(results) logging.info("Python verifier returning %s", responseJSON) outQueue.put(responseJSON)
def mytestfile(filename, globs, optionflags, strict=False): parser = doctest.DocTestParser() if globs is None: globs = {} else: globs = globs.copy() name = os.path.basename(filename) runner = doctest.DocTestRunner(checker=MyOutputChecker(strict=strict), optionflags=optionflags) # Read the file, convert it to a test, and run it. s = open(filename).read() test = parser.get_doctest(s, globs, name, filename, 0) runner.run(test) runner.summarize() return runner.failures, runner.tries
def doctests_pass(self, fname, module): '''Check that all doctests in f pass. ''' _, doc = self.exists_test(fname, module) doc_test = doctest.DocTestParser().get_doctest( doc, module.__dict__, 'doctests_{}'.format(fname), module.__name__, None) runner = doctest.DocTestRunner() runner.run(doc_test) failed, _ = runner.summarize() msg = FAILURE_MESSAGE.format(fname) self.assertFalse(failed, msg) # no failed cases
def get_examples(pkg): doc = pkg.__doc__ parser = doctest.DocTestParser() es = parser.get_examples(doc) rst = [] for e in es: rst.append('>>> ' + e.source.strip()) rst.append(e.want.strip()) if rst == []: with open("synopsis.txt", 'r') as f: return f.read() rst = '\n'.join(rst) return rst