def testfile(filename, fileErrors): """Funció testfile() que rep per paràmetres el filename que seria el fitxer doctest que volem mostrar per pantalla, i el fitxer fileErrors que serà els links dels errors que volem mostrar. La funció crearà una instància de myDocTestRunner, i instanciarà un paràmetre strings que serà una llista d'strings els cuals l[n] i l[n+1] seran els strings en el cual el n text estarà entremig""" fitxer = open(filename, "r").read() l = DocTestParser().parse(fitxer) l1 = [] for i in l: if isinstance(i, str): l1.append(i) l2 = DocTestParser().get_examples(fitxer) mapp = dict() doc = doctest.DocTest(l2, mapp, "", None, None, None) runner = MyDocTestRunner() runner.strings = l1 runner.run(doc) print("\n.. Enllacos als errors") with open(fileErrors, "r") as f: print(f.read())
def t(): if sys.version_info[0] == 2: # This test is disabled in Python2. There are too many subtle differences in the syntax ('str' has to be renamed 'unicode', # 'u' prefix is needed in front of string literals, etc), it's too hacky to preserve compatibility. # # In any case this test isn't to verify that the library works in Python2, it's too check that the README is up to date # with the code. So it doesn't matter. # return readme_file_path = path.join(path.dirname(__file__), '..', 'README.md') with open(readme_file_path, 'rb') as file_in: doctest_str = '\n\n'.join( re.findall( r'```python\s+(.+?)```', file_in.read().decode('UTF-8'), flags=re.S, ), ) assert doctest_str parser = DocTestParser() runner = DocTestRunner() runner.run( parser.get_doctest( doctest_str, dict(globals(), json=json, pickle=pickle), 'README.md', 'README.md', 0, ), ) assert runner.failures == 0
def get_parts(string): ret = [] import ast from ast import Expr, Str, Assign from doctest import DocTestParser,Example dtp = DocTestParser() lines = string.split("\n") m_ast = ast.parse(string) str_linestarts = [ x.lineno for x in m_ast.body if isinstance(x, Expr) and isinstance(x.value, Str)] for i, node in enumerate(m_ast.body): lineno = node.lineno if isinstance(node, Assign) and node.targets[0].id in metadata_attrs: continue elif isinstance(node, Expr) and isinstance(node.value, Str): for s in dtp.parse(node.value.s): if isinstance(s, Example): if ret and isinstance(ret[-1], DocTestPart): ret[-1].add(s) else: ret.append(DocTestPart(s)) elif len(s.strip()) > 0: ret.append(TextPart(s.strip())) else: continue else: last_line = 0 for subnode in ast.walk(node): last_line = max(getattr(subnode,'lineno',0), last_line) code_str = '\n'.join(lines[lineno-1:last_line]).strip() if ret and isinstance(ret[-1], CodePart): ret[-1].add(code_str) else: ret.append(CodePart(code_str)) return ret
def get_parts(self): parser = DocTestParser() try: return parser.parse(self.doctest, self.file_path) except ValueError, error: self._print_message(str(error), 0) return []
def _test_docstr(docstr, verbose=True, optionflags=0, raise_on_error=True): parser = DocTestParser() if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) test = parser.get_doctest(docstr, {}, __name__, __file__, 0) runner.run(test)
def remove_doctests(docstring): """Parse a docstring and remove doctest examples.""" parser = DocTestParser() try: examples_and_strings = parser.parse(docstring) return ''.join(s for s in examples_and_strings if not isinstance(s, Example)) except ValueError as e: logging.exception('\nValueError when removing doctest from:\n---\n%s\n---\n%s\n' % (docstring, e)) return docstring
def get_parts(self): parser = DocTestParser() try: return parser.parse(self.doctest, self.file_path) except ValueError as error: # Output code without unicode literals needs to be normalised # largely for the test suite, and somewhat for the person reading # message. message = str(error).replace("u'", "'") self._print_message(message, 0) return []
def __init__(self, baseurl='', apiref_baseurl='', glossary_baseurl='', doctree_parser_settings=None): self._doctree_parser_settings = None if doctree_parser_settings is None: # shut up self._doctree_parser_settings = {'report_level': 5} self._baseurl = baseurl self._apiref_baseurl = apiref_baseurl self._glossary_baseurl = glossary_baseurl self._doctest_parser = DocTestParser() self._reset_state()
def test_readme_examples(): readme_file_path = path.join(path.dirname(__file__), '..', 'README.md') with open(readme_file_path, 'rt', encoding='UTF-8') as file_in: all_blocks = re.findall(r'```(\w+)\s+(.+?)```', file_in.read(), flags=re.S) with TemporaryDirectory() as temp_dir: chdir(temp_dir) for syntax, block in all_blocks: if syntax == 'console': command_match = re.search(r'^\$ (\w+) (.+)\s+', block) if not command_match: raise ValueError(block) print(command_match.group().rstrip()) command, args = command_match.groups() block = block[command_match.end():] if command == 'cat': # save the sample file to an actual file file_name = args with open(path.join(temp_dir, file_name), 'wt', encoding='UTF-8') as file_out: file_out.write(block) else: # check that the command output is as expcted actual_output = check_output( '%s %s' % (command, args), shell=True, cwd=temp_dir, encoding='UTF-8', env={ **environ, # `canif --help` reads this, and it can vary in the CI environment, so make it fixed 'COLUMNS': '71', }, ) print(actual_output) assert actual_output == block elif syntax == 'python': parser = DocTestParser() test = parser.get_doctest(block, {'canif': canif}, 'README.md', 'README.md', 0) runner = DocTestRunner() runner.run(test) assert not runner.failures
def get_doctests(string): import ast from ast import Expr, Str, Assign from doctest import DocTestParser, Example ret = [] dtp = DocTestParser() m_ast = ast.parse(string) for node in m_ast.body: if isinstance(node, Expr) and isinstance(node.value, Str): for s in dtp.parse(node.value.s): if isinstance(s, Example): ret.append(s) return ret
def _patched_DocFileTest(path, module_relative=True, package=None, globs=None, parser=DocTestParser(), **options): if globs is None: globs = {} if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path. if module_relative: package = _normalize_module(package) path = _module_relative_path(package, path) # Find the file and read it. name = os.path.basename(path) doc = open(path, 'U').read() # Convert it to a test, and wrap it in a DocFileCase. test = parser.get_doctest(doc, globs, name, path, 0) return DocFileCase(test, **options)
def parse(self, *args, **kwargs): examples = DocTestParser.parse(self, *args, **kwargs) for example in examples: if not isinstance(example, Example): continue if any(flag in example.options for flag in skipflags): example.options[SKIP] = True return examples
def __init__(self, title, raw_code, globs, locs): self.title = title self.raw_code = raw_code self.globs = globs self.locs = locs dt_parser = DocTestParser() doctests = dt_parser.get_examples(raw_code) if any(doctests): self.code = DocTest(examples=doctests, globs=self.globs, name=title, filename=None, lineno=None, docstring=None) else: self.code = compile(raw_code, title, "exec")
def __init__(self, title, raw_code, globs, locs): self.title = title self.raw_code = raw_code self.globs = globs self.locs = locs dt_parser = DocTestParser() doctests = dt_parser.get_examples(raw_code) if any(doctests): self.code = DocTest( examples=doctests, globs=self.globs, name=title, filename=None, lineno=None, docstring=None) else: self.code = compile(raw_code, title, "exec")
def __init__(self, args, assignment): super().__init__(args, assignment) # The environment in which the doctests are run (global vars) self.good_env = {} self.verb = self.args.verbose # Initialize the doctest module objects that will do the testing/parse self.parser = DocTestParser() self.runner = DocTestRunner(verbose=self.verb, optionflags=FAIL_FAST) self.lines_exec = 0 self.lines_total = 0
def parse(self, *args, **kwargs): examples = DocTestParser.parse(self, *args, **kwargs) for example in examples: if not isinstance(example, Example): continue if not mpc_version_110 and SKIP_MPC_LESS_THAN_110 in example.options: example.options[SKIP] = True if debug and SKIP_IN_DEBUG_MODE in example.options: example.options[SKIP] = True return examples
def main(): global args args = parse_args() logging.basicConfig( level=args.loglevel) parser = DocTestParser() with open(args.input) as fd: chunks = parser.parse(fd.read()) excount = 0 ctx = {} for chunk in chunks: if isinstance(chunk, Example): exec chunk.source in ctx if args.var in ctx: excount += 1 graph(excount, ctx, args.var, args.output, width=args.width, height=args.height) del ctx[args.var]
def _patched_testfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=DocTestParser()): global master if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path if module_relative: package = _normalize_module(package) filename = _module_relative_path(package, filename) # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) # Read the file, convert it to a test, and run it. s = open(filename, 'U').read() test = parser.get_doctest(s, globs, name, filename, 0) runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return runner.failures, runner.tries
def get_parts(string): ret = [] import ast from ast import Expr, Str, Assign from doctest import DocTestParser, Example dtp = DocTestParser() lines = string.split("\n") m_ast = ast.parse(string) str_linestarts = [ x.lineno for x in m_ast.body if isinstance(x, Expr) and isinstance(x.value, Str) ] for i, node in enumerate(m_ast.body): lineno = node.lineno if isinstance(node, Assign) and node.targets[0].id in metadata_attrs: continue elif isinstance(node, Expr) and isinstance(node.value, Str): for s in dtp.parse(node.value.s): if isinstance(s, Example): if ret and isinstance(ret[-1], DocTestPart): ret[-1].add(s) else: ret.append(DocTestPart(s)) elif len(s.strip()) > 0: ret.append(TextPart(s.strip())) else: continue else: last_line = 0 for subnode in ast.walk(node): last_line = max(getattr(subnode, 'lineno', 0), last_line) code_str = '\n'.join(lines[lineno - 1:last_line]).strip() if ret and isinstance(ret[-1], CodePart): ret[-1].add(code_str) else: ret.append(CodePart(code_str)) return ret
def _import_docstring(documenter): code_content = _import_docstring_code_content(documenter) if code_content: # noinspection PyBroadException try: code, content = code_content parser = DocTestParser() runner = DocTestRunner(verbose=0, optionflags=NORMALIZE_WHITESPACE | ELLIPSIS) glob = {} if documenter.modname: exec('from %s import *\n' % documenter.modname, glob) tests = parser.get_doctest(code, glob, '', '', 0) runner.run(tests, clear_globs=False) documenter.object = tests.globs[documenter.name] documenter.code = content documenter.is_doctest = True return True except Exception: pass
def _import_docstring(documenter): if getattr(documenter.directive, 'content', None): # noinspection PyBroadException try: import textwrap content = documenter.directive.content def get_code(source, c=''): s = "\n%s" % c return textwrap.dedent(s.join(map(str, source))) is_doctest = contains_doctest(get_code(content)) offset = documenter.directive.content_offset if is_doctest: parent, parent_offset = get_grandfather_content(content) parent = parent[:offset + len(content) - parent_offset] code = get_code(parent) else: code = get_code(content, '>>> ') parser = DocTestParser() runner = DocTestRunner(verbose=0, optionflags=NORMALIZE_WHITESPACE | ELLIPSIS) glob = {} exec('import %s as mdl\n' % documenter.modname, glob) glob = glob['mdl'].__dict__ tests = parser.get_doctest(code, glob, '', '', 0) runner.run(tests, clear_globs=False) documenter.object = tests.globs[documenter.name] documenter.code = content documenter.is_doctest = True return True except: return False
def get_doc_test_cases_from_string( self, string, name = '<string>', filename = '<string>', globs = None, ): if globs is None: globs = {} # Make sure __name__ == '__main__' checks fail: globs = dict(globs) globs['__name__'] = None parser = DocTestParser() test = parser.get_doctest( string, globs = globs, name = name, filename = filename, lineno = 0, ) test_case = self.make_doc_test_case(test) return [test_case]
def main(): global args args = parse_args() logging.basicConfig(level=args.loglevel) parser = DocTestParser() with open(args.input) as fd: chunks = parser.parse(fd.read()) excount = 0 ctx = {} for chunk in chunks: if isinstance(chunk, Example): exec(chunk.source, ctx) if args.var in ctx: excount += 1 graph(excount, ctx, args.var, args.output, width=args.width, height=args.height) del ctx[args.var]
def _run_scripted_test(self, test): """Helper function to run a scripted doctest""" script = (XML(p) for p in DocTestParser().parse(test._dt_test.docstring) if isinstance(p, str) and p.strip()) self.proxy.photos_recentlyUpdated.mock_returns = None self.proxy.photos_recentlyUpdated.mock_returns_iter = script _ = self.index.refresh() test._dt_test.globs['_'] = _ test._dt_test.globs['index'] = self.index test._dt_test.globs['proxy'] = self.proxy test.runTest()
def print_deprecated_uses(paths): dep_names = set() dep_files = set() for path in sorted(paths): if os.path.isdir(path): dep_names.update(print_deprecated_uses( [os.path.join(path,f) for f in os.listdir(path)])) elif path.endswith('.py'): print_deprecated_uses_in(open(path).readline, path, dep_files, dep_names, 0) elif path.endswith('.doctest') or path.endswith('.txt'): for example in DocTestParser().get_examples(open(path).read()): ex = StringIO(example.source) try: print_deprecated_uses_in(ex.readline, path, dep_files, dep_names, example.lineno) except tokenize.TokenError: print(term.RED + 'Caught TokenError -- ' 'malformatted doctest?' + term.NORMAL) return dep_names
def _copy_doctests( src_path: pathlib.Path, dst_f: TextIO, dp: doctest.DocTestParser = doctest.DocTestParser(), ): with src_path.open() as src_f: src_p = str(src_path.resolve()) dt = dp.get_doctest(src_f.read(), {"__name__": "__main__"}, src_p, src_p, 0) cur_lineno = 0 if not dt.examples: logging.debug("no doctests found in %s", src_path) for example in dt.examples: assert cur_lineno <= example.lineno while cur_lineno < example.lineno: dst_f.write("# skipped line {}\n".format(cur_lineno)) cur_lineno += 1 dst_f.write(example.source) cur_lineno += sum(1 for c in example.source if c == "\n")
class ReST2IPyNB(object): ipynb_template = { "metadata": { "name": "" }, "nbformat": 3, "nbformat_minor": 0, "worksheets": [{ "cells": [], "metadata": {} }] } codecell_template = { "cell_type": "code", "collapsed": False, "input": [], "language": "python", "metadata": {}, "outputs": [] } markdowncell_template = { "cell_type": "markdown", "metadata": {}, "source": [] } headingcell_template = { "cell_type": "heading", "level": None, "metadata": {}, "source": [] } def __init__(self, baseurl='', apiref_baseurl='', glossary_baseurl='', doctree_parser_settings=None): self._doctree_parser_settings = None if doctree_parser_settings is None: # shut up self._doctree_parser_settings = {'report_level': 5} self._baseurl = baseurl self._apiref_baseurl = apiref_baseurl self._glossary_baseurl = glossary_baseurl self._doctest_parser = DocTestParser() self._reset_state() def _reset_state(self): self._state = { 'sec_depth': 1, 'indent': 0, 'in_markdowncell': False, 'need_new_codecell': False, 'need_hanging_indent': False } self._currcell = None self._buffer = '' self._notebook = None self._filename = None def __call__(self, filename): self._filename = filename rest = open(filename).read() doc = prep_rest(rest) self._notebook = deepcopy(ReST2IPyNB.ipynb_template) doctree = publish_doctree( doc, settings_overrides=self._doctree_parser_settings) self._currcells = self._notebook['worksheets'][0]['cells'] self._parse(doctree) self._store_currcell() notebook = self._notebook self._reset_state() return notebook def _ref2apiref(self, reftext): apiref_baseurl = self._apiref_baseurl # try to determine what kind of ref we got if reftext.startswith(':'): rtype, ref = re.match(':([a-z]+):(.*)', reftext).groups() else: rtype = None ref = reftext if rtype is None: # function? if ref.endswith('()'): rtype = 'func' ref = ref[:-2] refid = ref.lstrip('~').rstrip('()') if rtype == 'meth': ref_url = '%s/%s.html#%s' % (apiref_baseurl, '.'.join( refid.split('.')[:-1]), refid) else: ref_url = '%s/%s.html#%s' % (apiref_baseurl, refid, refid.replace('.', '-').replace( '_', '-').lower()) ref_label = None if ref.startswith('~'): if rtype == 'meth': ref_label = '%s()' % '.'.join(refid.split('.')[-2:]) elif rtype == 'func': ref_label = '%s()' % refid.split('.')[-1] else: ref_label = '%s' % refid.split('.')[-1] return '[%s](%s)' % (ref_label, ref_url) def _parse(self, doctree): for child in doctree.children: tag = child.tagname if tag == 'title': self._add_headingcell(self._state['sec_depth']) self._parse(child) if not len(self._notebook['metadata']['name']): self._notebook['metadata']['name'] = self._buffer elif tag == '#text': self._add2buffer(child.astext()) elif tag == 'paragraph': self._add_markdowncell() if self._state['need_hanging_indent']: self._state['need_hanging_indent'] = False else: self._add2buffer('', newline=True, paragraph=True) self._flush_buffer() self._parse(child) # FIXME: literal_block likely needs better handling elif tag == 'literal_block': self._add_markdowncell() if self._state['need_hanging_indent']: self._state['need_hanging_indent'] = False else: self._add2buffer('', newline=True, paragraph=True) self._flush_buffer() self._parse(child) elif tag == 'inline': print("warning, no idea how to handle ``inline``") # FIXME: elif tag == 'raw': self._add_markdowncell() self._flush_buffer() self._currcell['source'].insert(0, child.astext()) self._store_currcell() elif tag == 'doctest_block': self._add_codecell() needs_new_codecell = False for ex in self._doctest_parser.get_examples(child.rawsource): if needs_new_codecell: self._add_codecell() self._add2buffer('%s%s' % (' ' * ex.indent, ex.source), newline=False) self._flush_buffer(startnew=True) needs_new_codecell = len(ex.want) > 0 elif tag == 'section': self._state['sec_depth'] += 1 self._parse(child) self._state['sec_depth'] -= 1 elif tag == 'note': self._add_markdowncell(force=True) self._parse(child) self._flush_buffer() self._currcell['source'].insert(0, '- - -\n*Note*') self._currcell['source'].append('- - -\n') self._store_currcell() elif tag == 'title_reference': self._flush_buffer() self._parse(child) if self._buffer.startswith(':term:'): # link to glossary term = re.match('.*<(.*)>', self._buffer) if term is None: term = re.match(':term:(.*)', self._buffer).groups()[0] term_text = term else: term = term.groups()[0] term_text = re.match(':term:(.*) <', self._buffer).groups()[0] self._buffer = '[%s](%s#term-%s)' % ( term_text, self._glossary_baseurl, term.lower().replace(' ', '-')) elif self._buffer.startswith('~mvpa') \ or self._buffer.startswith('mvpa') \ or self._buffer.startswith(':meth:') \ or self._buffer.startswith(':mod:') \ or self._buffer.startswith(':class:') \ or self._buffer.startswith(':func:'): # various API reference link variants self._buffer = self._ref2apiref(self._buffer) # XXX for the rest I have no idea how to link them without huge # effort elif self._buffer.startswith(':ref:'): self._buffer = '*%s*' \ % [m for m in re.match(':ref:(.*) <|:ref:(.*)', self._buffer).groups() if not m is None][0] elif self._buffer.startswith(':math:'): self._buffer = '$$%s$$' % self._buffer elif re.match(':([a-z]+):', self._buffer): # catch other ref type we should handle, but do not yet raise RuntimeError("unhandled reference type '%s'" % self._buffer) else: # plain refs seems to be mostly used for external API self._buffer = '`%s`' % self._buffer elif tag == 'emphasis': self._flush_buffer() self._parse(child) self._buffer = '*%s*' % self._buffer elif tag == 'strong': self._flush_buffer() self._parse(child) self._buffer = '**%s**' % self._buffer elif tag == 'literal': # strip one layer of backticks self._add2buffer(child.rawsource[1:-1]) elif tag == 'problematic': print 'PROBLEMATIC: %s' % child self._parse(child) elif tag == 'reference': self._flush_buffer() self._parse(child) self._buffer = '[%s][%s]' % (self._buffer, child.attributes['name']) elif tag in ['comment', 'target']: pass elif tag == 'definition_list': self._add_markdowncell() for item in child.children: self._flush_buffer() self._parse(item.children[0]) term = self._buffer self._buffer = '' self._parse(item.children[1]) self._buffer = '\n%s: %s' % (term, self._buffer) elif tag in ['enumerated_list', 'bullet_list']: self._add_markdowncell() for i, item in enumerate(child.children): if tag == 'enumerated_list': prefix = '%i.' % (i + 1, ) else: prefix = '*' self._flush_buffer() self._add2buffer('%s ' % prefix, newline=True, paragraph=True) self._state['indent'] += 4 self._state['need_hanging_indent'] = True self._parse(item) self._state['indent'] -= 4 self._flush_buffer() elif tag == 'list_item': for c in child.children: self._parse(c) elif tag == 'term': self._parse(child.children[0]) elif tag == 'figure': # this can't be expressed in markdown self._flush_buffer() file_url = '%s/%s.html' % ( self._baseurl, os.path.splitext(os.path.basename(self._filename))[0]) self._add2buffer('\[Visit [%s](%s) to view this figure\]' % (file_url, file_url), newline=True, paragraph=True) elif tag == 'block_quote': self._flush_buffer() first_line = len(self._currcell['source']) # skip the wrapping paragraph self._parse(child.children[0]) self._flush_buffer() self._currcell['source'][first_line] = \ '\n\n> %s' % self._currcell['source'][first_line] elif tag == 'system_message': if child.attributes['type'] == 'INFO': pass elif child.children[0].astext( ) == 'Unknown directive type "exercise".': exercise_text = \ '\n'.join([l.strip() for l in child.children[1][0].astext().split('\n')][2:]) self._add_markdowncell(force=True) self._parse( publish_doctree( exercise_text, settings_overrides=self._doctree_parser_settings)) self._flush_buffer() self._currcell['source'].insert(0, '- - -\n*Exercise*') self._add_codecell() self._add2buffer( '# you can use this cell to for this exercise\n') self._add_markdowncell() self._currcell['source'].append('- - -\n') elif child.children[0].astext( ) == 'Unknown directive type "todo".': pass elif child.children[0].astext( ) == 'Unknown directive type "tikz".': pass elif child.children[0].astext( ) == 'Unknown directive type "ipython".': python_code = \ '\n'.join([l.strip() for l in child.children[1][0].astext().split('\n')][2:]) self._flush_buffer() self._add_codecell() self._currcell['input'].insert(0, python_code) self._store_currcell() else: raise RuntimeError("cannot handle system message '%s'" % child.astext()) else: if hasattr(child, 'line') and child.line: line = ' on line %i' % child.line else: line = '' raise RuntimeError("Unknown tag '%s'%s" % (tag, line)) def _store_currcell(self): if not self._currcell is None: self._flush_buffer() if self._currcell['cell_type'] == 'code': # remove last newline to save on vertical space self._currcell['input'][-1] = self._currcell['input'][ -1].rstrip('\n') self._currcells.append(self._currcell) self._currcell = None def _add_headingcell(self, level): self._store_currcell() self._currcell = deepcopy(ReST2IPyNB.headingcell_template) self._currcell['level'] = level def _add_codecell(self): self._store_currcell() self._currcell = deepcopy(ReST2IPyNB.codecell_template) def _add_markdowncell(self, force=False): if self._currcell is None \ or not self._currcell['cell_type'] == 'markdown' \ or force: # we need a new cell self._store_currcell() self._currcell = deepcopy(ReST2IPyNB.markdowncell_template) def _add2buffer(self, value, newline=False, paragraph=False): if paragraph: nl = '\n\n' else: nl = '\n' if newline: self._buffer += '%s%s%s' % (nl, ' ' * self._state['indent'], value) else: self._buffer += value def _flush_buffer(self, startnew=True): if not len(self._buffer): return if self._currcell['cell_type'] == 'code': target_field = 'input' else: target_field = 'source' if startnew or not len(self._currcell[target_field]): self._currcell[target_field].append(self._buffer) else: self._currcell[target_field][-1] += self._buffer self._buffer = ''
def doctestfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=DocTestParser(), encoding=None, compileflags=None): text = open(filename).read() # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) defects = [] class CustomDocTestRunner(DocTestRunner): def report_failure(self, out, test, example, got): text = "doctest for %s failed, want %s got %s" % ( example.source.strip(), example.want.split()[0], got) lineno = example.lineno error = dict(summary=text, type=60, filename=filename, lineno=lineno + 1, offset=1) defects.append(error) def report_unexpected_exception(self, out, test, example, exc_info): text = "doctest for %s failed, exception %s" % ( example.source.strip(), repr(exc_info[1])) lineno = example.lineno error = dict(summary=text, type=80, filename=filename, lineno=lineno + 1, offset=1) defects.append(error) runner = CustomDocTestRunner(verbose=verbose, optionflags=optionflags) if encoding is not None: text = text.decode(encoding) # compile and execute the file to get the global definitions exec compile(text, filename, "exec") in globs # Read the file, convert it to a test, and run it. tests = parser.get_doctest(text, globs, name, filename, 0) count, fail = runner.run(tests) if not count: wx.MessageBox("No doc tests could be run from: %s module" % filename, "No doc tests found", wx.ICON_ERROR) for defect in defects: yield defect
from cognite.client.experimental import CogniteClient def collect_apis(obj, done): if done.get(obj.__class__): return [] done[obj.__class__] = True apis = inspect.getmembers( obj, lambda m: isinstance(m, APIClient) and not done.get(m.__class__)) sub = [(n + "." + sn, sa) for n, c in apis for sn, sa in collect_apis(c, done)] return apis + sub client = CogniteClient(project="_", api_key="_", client_name="_") parser = DocTestParser() apis = collect_apis(client, {}) snippets = { "language": "Python", "label": "Python SDK", "operations": defaultdict(str) } filter_out = [ "from cognite.client import CogniteClient", "c = CogniteClient()", "" ] duplicate_operations = { "listAssets": "getAssets", "advancedListEvents": "listEvents",
class ReST2IPyNB(object): ipynb_template = { "metadata": {"name": ""}, "nbformat": 3, "nbformat_minor": 0, "worksheets": [{"cells": [], "metadata": {}}] } codecell_template = { "cell_type": "code", "collapsed": False, "input": [], "language": "python", "metadata": {}, "outputs": [] } markdowncell_template = { "cell_type": "markdown", "metadata": {}, "source": [] } headingcell_template = { "cell_type": "heading", "level": None, "metadata": {}, "source": [] } def __init__(self, baseurl='', apiref_baseurl='', glossary_baseurl='', doctree_parser_settings=None): self._doctree_parser_settings = None if doctree_parser_settings is None: # shut up self._doctree_parser_settings = {'report_level': 5} self._baseurl = baseurl self._apiref_baseurl = apiref_baseurl self._glossary_baseurl = glossary_baseurl self._doctest_parser = DocTestParser() self._reset_state() def _reset_state(self): self._state = {'sec_depth': 1, 'indent': 0, 'in_markdowncell': False, 'need_new_codecell': False, 'need_hanging_indent': False} self._currcell = None self._buffer = '' self._notebook = None self._filename = None def __call__(self, filename): self._filename = filename rest = open(filename).read() doc = prep_rest(rest) self._notebook = deepcopy(ReST2IPyNB.ipynb_template) doctree = publish_doctree( doc, settings_overrides=self._doctree_parser_settings) self._currcells = self._notebook['worksheets'][0]['cells'] self._parse(doctree) self._store_currcell() notebook = self._notebook self._reset_state() return notebook def _ref2apiref(self, reftext): apiref_baseurl = self._apiref_baseurl # try to determine what kind of ref we got if reftext.startswith(':'): rtype, ref = re.match(':([a-z]+):(.*)', reftext).groups() else: rtype = None ref = reftext if rtype is None: # function? if ref.endswith('()'): rtype = 'func' ref = ref[:-2] refid = ref.lstrip('~').rstrip('()') if rtype == 'meth': ref_url = '%s/%s.html#%s' % (apiref_baseurl, '.'.join(refid.split('.')[:-1]), refid) else: ref_url = '%s/%s.html#%s' % (apiref_baseurl, refid, refid.replace('.', '-').replace('_', '-').lower()) ref_label = None if ref.startswith('~'): if rtype == 'meth': ref_label = '%s()' % '.'.join(refid.split('.')[-2:]) elif rtype == 'func': ref_label = '%s()' % refid.split('.')[-1] else: ref_label = '%s' % refid.split('.')[-1] return '[%s](%s)' % (ref_label, ref_url) def _parse(self, doctree): for child in doctree.children: tag = child.tagname if tag == 'title': self._add_headingcell(self._state['sec_depth']) self._parse(child) if not len(self._notebook['metadata']['name']): self._notebook['metadata']['name'] = self._buffer elif tag == '#text': self._add2buffer(child.astext()) elif tag == 'paragraph': self._add_markdowncell() if self._state['need_hanging_indent']: self._state['need_hanging_indent'] = False else: self._add2buffer('', newline=True, paragraph=True) self._flush_buffer() self._parse(child) # FIXME: literal_block likely needs better handling elif tag == 'literal_block': self._add_markdowncell() if self._state['need_hanging_indent']: self._state['need_hanging_indent'] = False else: self._add2buffer('', newline=True, paragraph=True) self._flush_buffer() self._parse(child) elif tag == 'inline': print("warning, no idea how to handle ``inline``") # FIXME: elif tag == 'raw': self._add_markdowncell() self._flush_buffer() self._currcell['source'].insert(0, child.astext()) self._store_currcell() elif tag == 'doctest_block': self._add_codecell() needs_new_codecell = False for ex in self._doctest_parser.get_examples(child.rawsource): if needs_new_codecell: self._add_codecell() self._add2buffer('%s%s' % (' ' * ex.indent, ex.source), newline=False) self._flush_buffer(startnew=True) needs_new_codecell = len(ex.want) > 0 elif tag == 'section': self._state['sec_depth'] += 1 self._parse(child) self._state['sec_depth'] -= 1 elif tag == 'note': self._add_markdowncell(force=True) self._parse(child) self._flush_buffer() self._currcell['source'].insert(0, '- - -\n*Note*') self._currcell['source'].append('- - -\n') self._store_currcell() elif tag == 'title_reference': self._flush_buffer() self._parse(child) if self._buffer.startswith(':term:'): # link to glossary term = re.match('.*<(.*)>', self._buffer) if term is None: term = re.match(':term:(.*)', self._buffer).groups()[0] term_text = term else: term = term.groups()[0] term_text = re.match(':term:(.*) <', self._buffer).groups()[0] self._buffer = '[%s](%s#term-%s)' % (term_text, self._glossary_baseurl, term.lower().replace(' ', '-')) elif self._buffer.startswith('~mvpa') \ or self._buffer.startswith('mvpa') \ or self._buffer.startswith(':meth:') \ or self._buffer.startswith(':mod:') \ or self._buffer.startswith(':class:') \ or self._buffer.startswith(':func:'): # various API reference link variants self._buffer = self._ref2apiref(self._buffer) # XXX for the rest I have no idea how to link them without huge # effort elif self._buffer.startswith(':ref:'): self._buffer = '*%s*' \ % [m for m in re.match(':ref:(.*) <|:ref:(.*)', self._buffer).groups() if not m is None][0] elif self._buffer.startswith(':math:'): self._buffer = '$$%s$$' % self._buffer elif re.match(':([a-z]+):', self._buffer): # catch other ref type we should handle, but do not yet raise RuntimeError("unhandled reference type '%s'" % self._buffer) else: # plain refs seems to be mostly used for external API self._buffer = '`%s`' % self._buffer elif tag == 'emphasis': self._flush_buffer() self._parse(child) self._buffer = '*%s*' % self._buffer elif tag == 'strong': self._flush_buffer() self._parse(child) self._buffer = '**%s**' % self._buffer elif tag == 'literal': # strip one layer of backticks self._add2buffer(child.rawsource[1:-1]) elif tag == 'problematic': print 'PROBLEMATIC: %s' % child self._parse(child) elif tag == 'reference': self._flush_buffer() self._parse(child) self._buffer = '[%s][%s]' % (self._buffer, child.attributes['name']) elif tag in ['comment', 'target']: pass elif tag == 'definition_list': self._add_markdowncell() for item in child.children: self._flush_buffer() self._parse(item.children[0]) term = self._buffer self._buffer = '' self._parse(item.children[1]) self._buffer = '\n%s: %s' % (term, self._buffer) elif tag in ['enumerated_list', 'bullet_list']: self._add_markdowncell() for i, item in enumerate(child.children): if tag == 'enumerated_list': prefix = '%i.' % (i + 1,) else: prefix = '*' self._flush_buffer() self._add2buffer('%s ' % prefix, newline=True, paragraph=True) self._state['indent'] += 4 self._state['need_hanging_indent'] = True self._parse(item) self._state['indent'] -= 4 self._flush_buffer() elif tag == 'list_item': for c in child.children: self._parse(c) elif tag == 'term': self._parse(child.children[0]) elif tag == 'figure': # this can't be expressed in markdown self._flush_buffer() file_url = '%s/%s.html' % (self._baseurl, os.path.splitext(os.path.basename(self._filename))[0]) self._add2buffer('\[Visit [%s](%s) to view this figure\]' % (file_url, file_url), newline=True, paragraph=True) elif tag == 'block_quote': self._flush_buffer() first_line = len(self._currcell['source']) # skip the wrapping paragraph self._parse(child.children[0]) self._flush_buffer() self._currcell['source'][first_line] = \ '\n\n> %s' % self._currcell['source'][first_line] elif tag == 'system_message': if child.attributes['type'] == 'INFO': pass elif child.children[0].astext() == 'Unknown directive type "exercise".': exercise_text = \ '\n'.join([l.strip() for l in child.children[1][0].astext().split('\n')][2:]) self._add_markdowncell(force=True) self._parse(publish_doctree( exercise_text, settings_overrides=self._doctree_parser_settings)) self._flush_buffer() self._currcell['source'].insert(0, '- - -\n*Exercise*') self._add_codecell() self._add2buffer('# you can use this cell to for this exercise\n') self._add_markdowncell() self._currcell['source'].append('- - -\n') elif child.children[0].astext() == 'Unknown directive type "todo".': pass elif child.children[0].astext() == 'Unknown directive type "tikz".': pass elif child.children[0].astext() == 'Unknown directive type "ipython".': python_code = \ '\n'.join([l.strip() for l in child.children[1][0].astext().split('\n')][2:]) self._flush_buffer() self._add_codecell() self._currcell['input'].insert(0, python_code) self._store_currcell() else: raise RuntimeError("cannot handle system message '%s'" % child.astext()) else: if hasattr(child, 'line') and child.line: line = ' on line %i' % child.line else: line = '' raise RuntimeError("Unknown tag '%s'%s" % (tag, line)) def _store_currcell(self): if not self._currcell is None: self._flush_buffer() if self._currcell['cell_type'] == 'code': # remove last newline to save on vertical space self._currcell['input'][-1] = self._currcell['input'][-1].rstrip('\n') self._currcells.append(self._currcell) self._currcell = None def _add_headingcell(self, level): self._store_currcell() self._currcell = deepcopy(ReST2IPyNB.headingcell_template) self._currcell['level'] = level def _add_codecell(self): self._store_currcell() self._currcell = deepcopy(ReST2IPyNB.codecell_template) def _add_markdowncell(self, force=False): if self._currcell is None \ or not self._currcell['cell_type'] == 'markdown' \ or force: # we need a new cell self._store_currcell() self._currcell = deepcopy(ReST2IPyNB.markdowncell_template) def _add2buffer(self, value, newline=False, paragraph=False): if paragraph: nl = '\n\n' else: nl = '\n' if newline: self._buffer += '%s%s%s' % (nl, ' ' * self._state['indent'], value) else: self._buffer += value def _flush_buffer(self, startnew=True): if not len(self._buffer): return if self._currcell['cell_type'] == 'code': target_field = 'input' else: target_field = 'source' if startnew or not len(self._currcell[target_field]): self._currcell[target_field].append(self._buffer) else: self._currcell[target_field][-1] += self._buffer self._buffer = ''
class TestingProtocol(models.Protocol): """A Protocol that executes doctests as lists of Example objects, supports suite/case specificity, alternate file testing, and provides users with details such as cases passed and test coverage. """ def __init__(self, args, assignment): super().__init__(args, assignment) # The environment in which the doctests are run (global vars) self.good_env = {} self.verb = self.args.verbose # Initialize the doctest module objects that will do the testing/parse self.parser = DocTestParser() self.runner = DocTestRunner(verbose=self.verb, optionflags=FAIL_FAST) self.lines_exec = 0 self.lines_total = 0 def test(self, good_env={}, suite=None, case=None): test_results = {} # all examples to be run will be put in exs exs = collections.OrderedDict() # use regex to get raw strings organized into suite/case self.get_data() try: if suite: exs = self.get_suite_examples(suite, case) elif case: # No support for cases without their suite raise EarlyExit('python3 ok: error: ' 'Please specify suite for given case ({}).'.format(case[0])) else: exs = self.get_all_examples() # gets analytics to be returned test_results[self.tstfile_name] = self.analyze(suite, case, exs) except KeyError as e: raise EarlyExit('python3 ok: error: ' 'Suite/Case label must be valid.' '(Suites: {}, Cases: {})'.format(self.num_suites, self.num_cases)) return test_results def analyze(self, suite, case, examples): failed, attempted = self.run_examples(examples) self.cov.stop() passed = attempted - failed format.print_test_progress_bar( '{} summary'.format(self.tstfile_name), passed, failed, verbose=self.verb) # only support test coverage stats when running everything if not suite: self.print_coverage() if self.args.coverage: if self.lines_exec == self.lines_total: print("Maximum coverage achieved! Great work!") else: self.give_suggestions() return {'suites_total' : self.num_suites, 'cases_total': self.num_cases, 'exs_failed' : failed, 'exs_passed' : passed, 'attempted' : attempted, 'actual_cov' : self.lines_exec, 'total_cov' : self.lines_total} def give_suggestions(self): print("Consider adding tests for the following:") for file in self.clean_src: file += '.py' cov_stats = self.cov.analysis2(file) missing_cov = cov_stats[3] if missing_cov: print(' File: {}'.format(file)) missing_string = ' Line(s): ' + ','.join(map(str, missing_cov)) print(missing_string) def get_suite_examples(self, suite, case): # suite/case specified, so only parse relevant text into Examples exs = collections.OrderedDict() case_ex = collections.OrderedDict() # get the shared lines that should impact all the cases in the suite. shrd_txt = self.shared_case_data[suite] if shrd_txt: parse_shared = self.parser.parse(shrd_txt.group(0), self.tstfile_name) shrd_ex = [i for i in parse_shared if isinstance(i, Example)] if shrd_ex: case_ex['shared'] = shrd_ex if case: if str(case[0]) not in self.data[suite]: raise KeyError parsed_temp_examples = self.parser.parse(self.data[suite][case[0]], self.tstfile_name) case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)] case_ex[str(case[0])] = case_examples else: for itemcase in self.data[suite].keys(): parsed_temp_examples = self.parser.parse(self.data[suite][itemcase], self.tstfile_name) case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)] case_ex[itemcase] = case_examples exs[suite] = case_ex return exs def get_all_examples(self): # no suite/case flag, so parses all text into Example objects exs = collections.OrderedDict() for sui in self.data.keys(): case_ex = collections.OrderedDict() # get the shared lines that should impact all the cases in the suite. shrd_txt = self.shared_case_data[sui] if shrd_txt: parse_shared = self.parser.parse(shrd_txt.group(0), self.tstfile_name) shrd_ex = [i for i in parse_shared if isinstance(i, Example)] if shrd_ex: case_ex['shared'] = shrd_ex for itemcase in self.data[sui].keys(): parsed_temp_examples = self.parser.parse(self.data[sui][itemcase], self.tstfile_name) case_examples = [i for i in parsed_temp_examples if isinstance(i, Example)] case_ex[itemcase] = case_examples exs[sui] = case_ex return exs # catch inf loops/ recur err @conditionally(timeout(10), os.name != 'nt') def run_examples(self, exs): # runs the Example objects, keeps track of right/wrong etc total_failed = 0 total_attempted = 0 case = 'shared' for sui in exs.keys(): if not total_failed: final_env = dict(self.good_env) if 'shared' in exs[sui].keys(): dtest = DocTest(exs[sui]['shared'], self.good_env, 'shared', None, None, None) result = self.runner.run(dtest, clear_globs=False) # take the env from shared dtest and save it for other exs final_env = dict(self.good_env, **dtest.globs) total_failed += result.failed total_attempted += result.attempted for case in exs[sui].keys(): if case != 'shared': if not total_failed: example_name = "Suite {}, Case {}".format(sui, case) dtest = DocTest(exs[sui][case], final_env, example_name, None, None, None) result = self.runner.run(dtest) total_failed += result.failed total_attempted += result.attempted return total_failed, total_attempted def get_data(self): # organizes data into suite/case strings to feed to the parser module self.tstfile_name, data_str = self.get_tstfile(self.testloc) self.data = collections.OrderedDict() self.shared_case_data = collections.OrderedDict() # chunk the file into suites data_suites = re.findall("(Suite\s*([\d\w]+))((?:(?!Suite)(.|\n))*)", data_str) self.num_suites = len(data_suites) self.num_cases = 0 for curr_suite in data_suites: case_data = collections.OrderedDict() # chunk the suite into cases cases = re.findall("(Case\s*([\d\w]+))((?:(?!Case)(.|\n))*)", curr_suite[2]) self.num_cases += len(cases) self.shared_case_data[str(curr_suite[1])] = re.match("((?:(?!Case)(.|\n))*)", curr_suite[2]) for curr_case in cases: case_data[curr_case[1]] = curr_case[2] self.data[curr_suite[1]] = case_data def get_tstfile(self, location): # return file, file as a string PATH = os.path.join(location, self.args.testing) name = self.args.testing if not name.endswith('.rst'): raise EarlyExit('python3 ok: error: ' 'Only .rst files are supported at this time.') try: with open(PATH, "r") as testfile: data_str=testfile.read() except FileNotFoundError as e: raise EarlyExit('python3 ok: error: ' '{} test file ({}) cannot be found.'.format( 'Default' if DEFAULT_TST_FILE==name else 'Specified', name)) return name, data_str def print_coverage(self): # prints the coverage summary by diffing the two coverage trackers lines, executed = self.get_coverage(self.cov) self.lines_total = lines self.lines_exec = executed format.print_coverage_bar( 'Coverage summary', self.lines_exec, self.lines_total,verbose=self.verb) def get_coverage(self, cov): # returns executable lines, executed_lines lines_run = 0 total_lines = 0 for file in self.clean_src: file_cov = cov.analysis2(file + '.py') lines = len(file_cov[1]) lines_not_run = len(file_cov[3]) total_lines += lines lines_run += lines - lines_not_run return total_lines, lines_run def run(self, messages, testloc=CURR_DIR): if self.args.score or self.args.unlock or not self.args.testing: return # Note: All (and only) .py files given in the src will be tracked and # contribute to coverage statistics self.clean_src = [i[:-3] for i in self.assignment.src if i.endswith('.py')] self.cov = coverage(source=[testloc], include=[file + '.py' for file in self.clean_src]) self.testloc = testloc self.cov.start() analytics = self.test(self.good_env, self.args.suite, self.args.case) messages['testing'] = analytics