def test_mod_lineno(): with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_mod_lineno.py') source = utils.codeblock(''' class Fun(object): #1 @property def test(self): """ # 4 >>> a = 1 >>> 1 / 0 """ ''') with open(modpath, 'w') as file: file.write(source) doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] # print(self._parts[0]) assert self.lineno == 5 # print(self.format_src()) self.config['colored'] = False assert self.format_src(offset_linenos=False).strip().startswith('1') assert self.format_src(offset_linenos=True).strip().startswith('5') with utils.PythonPathContext(dpath): status = self.run(verbose=10, on_error='return') assert not status['passed']
def test_collect_module_level_singleline(): """ pytest testing/test_core.py::test_collect_module_level Ignore: temp = utils.TempDir() """ temp = utils.TempDir() dpath = temp.ensure() modpath = join(dpath, 'test_collect_module_level_singleline.py') source = utils.codeblock('''">>> pass"''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] assert self.callname == '__doc__' self.config['colored'] = False assert self.format_src(offset_linenos=True).strip().startswith('1') assert self.format_src(offset_linenos=False).strip().startswith('1') with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert status['passed'] temp.cleanup()
def collect(self): from xdoctest import core modpath = str(self.fspath) style = self.config.getvalue('xdoctest_style') analysis = self.config.getvalue('xdoctest_analysis') self._prepare_internal_config() try: examples = list( core.parse_doctestables(modpath, style=style, analysis=analysis)) except SyntaxError: if self.config.getvalue('xdoctest_ignore_syntax_errors'): pytest.skip('unable to import module %r' % self.fspath) else: raise for example in examples: example.config.update(self._examp_conf) name = example.unique_callname if hasattr(XDoctestItem, 'from_parent'): yield XDoctestItem.from_parent(self, name=name, example=example) else: # direct construction is deprecated yield XDoctestItem(name, self, example)
def test_show_entire(): """ pytest testing/test_core.py::test_show_entire """ temp = utils.TempDir() dpath = temp.ensure() modpath = join(dpath, 'test_show_entire.py') source = utils.codeblock( ''' def foo(): """ Prefix Example: >>> x = 4 >>> x = 5 + x >>> x = 6 + x >>> x = 7 + x >>> x 22 >>> x = 8 + x >>> x = 9 + x >>> x = 10 + x >>> x = 11 + x >>> x = 12 + x >>> x 42 text-line-after """ ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core # calldefs = core.module_calldefs(modpath) # docline = calldefs['foo'].doclineno # docstr = calldefs['foo'].docstr # all_parts = parser.DoctestParser().parse(docstr) # assert docline == 2 doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] self.config['colored'] = False print(self.lineno) print(self._parts[0].line_offset) print(self.format_src()) src_offset = self.format_src(offset_linenos=True).strip() src_nooffset = self.format_src(offset_linenos=False).strip() assert src_offset[:4].startswith('6') assert src_nooffset[:4].startswith('1') with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert not status['passed'] temp.cleanup()
def _test_status(docstr): docstr = utils.codeblock(docstr) try: temp = utils.util_misc.TempDoctest(docstr=docstr) except Exception: # pytest seems to load an older version of xdoctest for some reason import xdoctest import inspect print('xdoctest.__version__ = {!r}'.format(xdoctest.__version__)) print('utils = {!r}'.format(utils)) print('utils.util_misc = {!r}'.format(utils.util_misc)) print('utils.TempDoctest = {!r}'.format(utils.TempDoctest)) print(inspect.getargspec(utils.TempDoctest)) raise doctests = list(core.parse_doctestables(temp.modpath)) status = doctests[0].run(verbose=0, on_error='return') return status
def test_no_docstr(): """ CommandLine: python -m test_core test_no_docstr """ with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_no_docstr.py') source = utils.codeblock(''' def get_scales(kpts): """ Gets average scale (does not take into account elliptical shape """ _scales = np.sqrt(get_sqrd_scales(kpts)) return _scales ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 0
def collect(self): from xdoctest import core modpath = str(self.fspath) style = self.config.getvalue('xdoctest_style') self._prepare_internal_config() try: examples = list(core.parse_doctestables(modpath, style=style)) except SyntaxError: if self.config.getvalue('xdoctest_ignore_syntax_errors'): pytest.skip('unable to import module %r' % self.fspath) else: raise for example in examples: example.config.update(self._examp_conf) name = example.unique_callname yield XDoctestItem(name, self, example)
def test_oneliner(): """ python ~/code/xdoctest/testing/test_core.py test_oneliner """ with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_oneliner.py') source = utils.codeblock(''' def foo(): """ >>> assert False, 'should fail' """ ''') with open(modpath, 'w') as file: file.write(source) doctests = list(core.parse_doctestables(modpath)) assert len(doctests) == 1 print('doctests = {!r}'.format(doctests)) import pytest with pytest.raises(AssertionError, match='should fail'): doctests[0].run()
def test_mod_globals(): with utils.TempDir() as temp: dpath = temp.dpath modpath = join(dpath, 'test_mod_globals.py') source = utils.codeblock(''' X = 10 def test(self): """ >>> X 10 """ ''') with open(modpath, 'w') as file: file.write(source) from xdoctest import core doctests = list(core.parse_doctestables(modpath, style='freeform')) assert len(doctests) == 1 self = doctests[0] with utils.PythonPathContext(dpath): status = self.run(verbose=0, on_error='return') assert status['passed'] assert self.logged_evals[0] == 10
def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], style='auto', verbose=None, config=None, durations=None, analysis='static'): """ Executes requestsed google-style doctests in a package or module. Main entry point into the testing framework. Args: modname (str): name of or path to the module. command (str): determines which doctests to run. if command is None, this is determined by parsing sys.argv Value values are 'all' - find and run all tests in a module 'list' - list the tests in a module 'dump' - dumps tests to stdout argv (List[str], default=None): if specified, command line flags that might influence beharior. if None uses sys.argv. SeeAlso :func:_update_argparse_cli SeeAlso :func:doctest_example.Config._update_argparse_cli verbose (int, default=None): Verbosity level. 0 - disables all text 1 - minimal printing 3 - verbose printing exclude (List[str]): ignores any modname matching any of these glob-like patterns config (Dict[str, object]): modifies each examples configuration durations (int, default=None): if specified report top N slowest tests analysis (str): determines if doctests are found using static or dynamic analysis. Returns: Dict: run_summary Example: >>> modname = 'xdoctest.dynamic_analysis' >>> result = doctest_module(modname, 'list', argv=['']) """ _log = partial(log, verbose=DEBUG) _log('------+ DEBUG +------') _log('CALLED doctest_module') _log('exclude = {!r}'.format(exclude)) _log('argv = {!r}'.format(argv)) _log('command = {!r}'.format(command)) _log('modpath_or_name = {!r}'.format(modpath_or_name)) _log('durations = {!r}'.format(durations)) _log('config = {!r}'.format(config)) _log('verbose = {!r}'.format(verbose)) _log('style = {!r}'.format(style)) _log('------+ /DEBUG +------') # Determine package name via caller if not specified if modpath_or_name is None: frame_parent = dynamic_analysis.get_parent_frame() modpath = frame_parent.f_globals['__file__'] else: if command is None: # Allow the modname to contain the name of the test to be run if '::' in modpath_or_name: modpath_or_name, command = modpath_or_name.split('::') modpath = core._rectify_to_modpath(modpath_or_name) if config is None: config = doctest_example.Config() command, style, verbose = _parse_commandline(command, style, verbose, argv) _log = partial(log, verbose=verbose) _log('Start doctest_module({!r})'.format(modpath_or_name)) _log('Listing tests') if command is None: # Display help if command is not specified _log('Not testname given. Use `all` to run everything or' ' pick from a list of valid choices:') command = 'list' # TODO: command should not be allowed to be the requested doctest name in # case it conflicts with an existing command. This probably requires an API # change to this function. gather_all = (command == 'all' or command == 'dump') tic = time.time() # Parse all valid examples with warnings.catch_warnings(record=True) as parse_warnlist: examples = list( core.parse_doctestables(modpath, exclude=exclude, style=style, analysis=analysis)) # Set each example mode to native to signal that we are using the # native xdoctest runner instead of the pytest runner for example in examples: example.mode = 'native' if command == 'list': if len(examples) == 0: _log('... no docstrings with examples found') else: _log(' ' + '\n '.join([ example.cmdline # + ' @ ' + str(example.lineno) for example in examples ])) run_summary = {'action': 'list'} else: _log('gathering tests') enabled_examples = [] for example in examples: if gather_all or command in example.valid_testnames: if gather_all and example.is_disabled(): continue enabled_examples.append(example) if len(enabled_examples) == 0: # Check for zero-arg funcs for example in _gather_zero_arg_examples(modpath): if command in example.valid_testnames: enabled_examples.append(example) elif command in ['zero-all', 'zero', 'zero_all', 'zero-args']: enabled_examples.append(example) if config: for example in enabled_examples: example.config.update(config) if command == 'dump': # format the doctests as normal unit tests _log('dumping tests to stdout') module_text = _convert_to_test_module(enabled_examples) _log(module_text) run_summary = {'action': 'dump'} else: # Run the gathered doctest examples RANDOMIZE_ORDER = False if RANDOMIZE_ORDER: # randomize the order in which tests are run import random random.shuffle(enabled_examples) run_summary = _run_examples(enabled_examples, verbose, config, _log=_log) toc = time.time() n_seconds = toc - tic # Print final summary info in a style similar to pytest if verbose >= 0 and run_summary: _print_summary_report(run_summary, parse_warnlist, n_seconds, enabled_examples, durations, config=config, _log=_log) return run_summary
def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], style='auto', verbose=None, config=None, durations=None): """ Executes requestsed google-style doctests in a package or module. Main entry point into the testing framework. Args: modname (str): name of or path to the module. command (str): determines which doctests to run. if command is None, this is determined by parsing sys.argv argv (list): if None uses sys.argv verbose (bool): verbosity flag exclude (list): ignores any modname matching any of these glob-like patterns config (dict): modifies each examples configuration Returns: Dict: run_summary Example: >>> modname = 'xdoctest.dynamic_analysis' >>> result = doctest_module(modname, 'list', argv=['']) """ print('Start doctest_module({!r})'.format(modpath_or_name)) # Determine package name via caller if not specified if modpath_or_name is None: frame_parent = dynamic.get_parent_frame() modpath = frame_parent.f_globals['__file__'] else: modpath = core._rectify_to_modpath(modpath_or_name) if config is None: config = doctest_example.Config() command, style, verbose = _parse_commandline(command, style, verbose, argv) if command == 'list': print('Listing tests') if command is None: # Display help if command is not specified print('Not testname given. Use `all` to run everything or' ' pick from a list of valid choices:') command = 'list' # TODO: command should not be allowed to be the requested doctest name in # case it conflicts with an existing command. This probably requires an API # change to this function. gather_all = (command == 'all' or command == 'dump') tic = time.time() # Parse all valid examples with warnings.catch_warnings(record=True) as parse_warnlist: examples = list( core.parse_doctestables(modpath, exclude=exclude, style=style)) # Set each example mode to native to signal that we are using the # native xdoctest runner instead of the pytest runner for example in examples: example.mode = 'native' if command == 'list': if len(examples) == 0: print('... no docstrings with examples found') else: print(' ' + '\n '.join([ example.cmdline # + ' @ ' + str(example.lineno) for example in examples ])) run_summary = {'action': 'list'} else: print('gathering tests') enabled_examples = [] for example in examples: if gather_all or command in example.valid_testnames: if gather_all and example.is_disabled(): continue enabled_examples.append(example) if len(enabled_examples) == 0: # Check for zero-arg funcs for example in _gather_zero_arg_examples(modpath): if command in example.valid_testnames: enabled_examples.append(example) elif command in ['zero-all', 'zero', 'zero_all', 'zero-args']: enabled_examples.append(example) if config: for example in enabled_examples: example.config.update(config) if command == 'dump': # format the doctests as normal unit tests print('dumping tests to stdout') _convert_to_test_module(enabled_examples) run_summary = {'action': 'dump'} else: # Run the gathered doctest examples RANDOMIZE_ORDER = False if RANDOMIZE_ORDER: # randomize the order in which tests are run import random random.shuffle(enabled_examples) run_summary = _run_examples(enabled_examples, verbose, config) toc = time.time() n_seconds = toc - tic # Print final summary info in a style similar to pytest if verbose >= 0 and run_summary: _print_summary_report(run_summary, parse_warnlist, n_seconds, enabled_examples, durations, config=config) return run_summary
def doctest_module(module_identifier=None, command=None, argv=None, exclude=[], style='auto', verbose=None, config=None, durations=None, analysis='static'): """ Executes requestsed google-style doctests in a package or module. Main entry point into the testing framework. Args: module_identifier (str | ModuleType | None): The name of / path to the module, or the live module itself. If not specified, dynamic analysis will be used to introspect the module that called this function and that module will be used. This can also contain the callname followed by the `::` token. command (str): determines which doctests to run. if command is None, this is determined by parsing sys.argv Value values are 'all' - find and run all tests in a module 'list' - list the tests in a module 'dump' - dumps tests to stdout argv (List[str], default=None): if specified, command line flags that might influence beharior. if None uses sys.argv. SeeAlso :func:_update_argparse_cli SeeAlso :func:doctest_example.DoctestConfig._update_argparse_cli verbose (int, default=None): Verbosity level. 0 - disables all text 1 - minimal printing 3 - verbose printing exclude (List[str]): ignores any modname matching any of these glob-like patterns config (Dict[str, object]): modifies each examples configuration durations (int, default=None): if specified report top N slowest tests analysis (str): determines if doctests are found using static or dynamic analysis. Returns: Dict: run_summary Example: >>> modname = 'xdoctest.dynamic_analysis' >>> result = doctest_module(modname, 'list', argv=['']) Example: >>> # xdoctest: +SKIP >>> # Demonstrate different ways "module_identifier" can be specified >>> # >>> # Using a module name >>> result = doctest_module('xdoctest.static_analysis') >>> # >>> # Using a module path >>> result = doctest_module(os.expandpath('~/code/xdoctest/xdoctest/static_analysis.py')) >>> # >>> # Using a module itself >>> from xdoctest import runner >>> result = doctest_module(runner) >>> # >>> # Using a module name and a specific callname >>> from xdoctest import runner >>> result = doctest_module('xdoctest.static_analysis::parse_static_value') """ _log = partial(log, verbose=DEBUG) _log('------+ DEBUG +------') _log('CALLED doctest_module') _log('exclude = {!r}'.format(exclude)) _log('argv = {!r}'.format(argv)) _log('command = {!r}'.format(command)) _log('module_identifier = {!r}'.format(module_identifier)) _log('durations = {!r}'.format(durations)) _log('config = {!r}'.format(config)) _log('verbose = {!r}'.format(verbose)) _log('style = {!r}'.format(style)) _log('------+ /DEBUG +------') modinfo = { 'modpath': None, 'modname': None, 'module': None, } if module_identifier is None: # Determine package name via caller if not specified frame_parent = dynamic_analysis.get_parent_frame() if '__file__' in frame_parent.f_globals: modinfo['modpath'] = frame_parent.f_globals['__file__'] else: # Module might not exist as a path on disk, we might be trying to # test an IPython session. modinfo['modname'] = frame_parent.f_globals['__name__'] modinfo['module'] = sys.modules[modinfo['modname']] else: if isinstance(module_identifier, types.ModuleType): modinfo['module'] = module_identifier modinfo['modpath'] = modinfo['module'].__file__ else: # Allow the modname to contain the name of the test to be run if '::' in module_identifier: if command is None: modpath_or_name, command = module_identifier.split('::') modinfo['modpath'] = core._rectify_to_modpath( modpath_or_name) else: raise ValueError('Command must be None if using :: syntax') else: modinfo['modpath'] = core._rectify_to_modpath( module_identifier) if config is None: config = doctest_example.DoctestConfig() command, style, verbose = _parse_commandline(command, style, verbose, argv) _log = partial(log, verbose=verbose) # Usually the "parseable_identifier" (i.e. the object we will extract the # docstrings from) is a path to a module, but sometimes we will only be # given the live module itself, hence the abstraction. if modinfo['modpath'] is None: parsable_identifier = modinfo['module'] else: parsable_identifier = modinfo['modpath'] _log('Start doctest_module({!r})'.format(parsable_identifier)) _log('Listing tests') if command is None: # Display help if command is not specified _log('Not testname given. Use `all` to run everything or' ' pick from a list of valid choices:') command = 'list' # TODO: command should not be allowed to be the requested doctest name in # case it conflicts with an existing command. This probably requires an API # change to this function. gather_all = (command == 'all' or command == 'dump') tic = time.time() # Parse all valid examples with warnings.catch_warnings(record=True) as parse_warnlist: examples = list( core.parse_doctestables(parsable_identifier, exclude=exclude, style=style, analysis=analysis)) # Set each example mode to native to signal that we are using the # native xdoctest runner instead of the pytest runner for example in examples: example.mode = 'native' if command == 'list': if len(examples) == 0: _log('... no docstrings with examples found') else: _log(' ' + '\n '.join([ example.cmdline # + ' @ ' + str(example.lineno) for example in examples ])) run_summary = {'action': 'list'} else: _log('gathering tests') enabled_examples = [] for example in examples: if gather_all or command in example.valid_testnames: if gather_all and example.is_disabled(): continue enabled_examples.append(example) if len(enabled_examples) == 0: # Check for zero-arg funcs for example in _gather_zero_arg_examples(parsable_identifier): if command in example.valid_testnames: enabled_examples.append(example) elif command in ['zero-all', 'zero', 'zero_all', 'zero-args']: enabled_examples.append(example) if config: for example in enabled_examples: example.config.update(config) if command == 'dump': # format the doctests as normal unit tests _log('dumping tests to stdout') module_text = _convert_to_test_module(enabled_examples) _log(module_text) run_summary = {'action': 'dump'} else: # Run the gathered doctest examples RANDOMIZE_ORDER = False if RANDOMIZE_ORDER: # randomize the order in which tests are run import random random.shuffle(enabled_examples) run_summary = _run_examples(enabled_examples, verbose, config, _log=_log) toc = time.time() n_seconds = toc - tic # Print final summary info in a style similar to pytest if verbose >= 0 and run_summary: _print_summary_report(run_summary, parse_warnlist, n_seconds, enabled_examples, durations, config=config, _log=_log) return run_summary