def repr_failure(self, excinfo): if excinfo.errisinstance(doctest.DocTestFailure): doctestfailure = excinfo.value example = doctestfailure.example test = doctestfailure.test filename = test.filename lineno = test.lineno + example.lineno + 1 message = excinfo.type.__name__ reprlocation = ReprFileLocation(filename, lineno, message) checker = doctest.OutputChecker() REPORT_UDIFF = doctest.REPORT_UDIFF filelines = py.path.local(filename).readlines(cr=0) i = max(test.lineno, max(0, lineno - 10)) # XXX? lines = [] for line in filelines[i:lineno]: lines.append("%03d %s" % (i+1, line)) i += 1 lines += checker.output_difference(example, doctestfailure.got, REPORT_UDIFF).split("\n") return ReprFailDoctest(reprlocation, lines) elif excinfo.errisinstance(doctest.UnexpectedException): excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) return super(DoctestItem, self).repr_failure(excinfo) else: return super(DoctestItem, self).repr_failure(excinfo)
def failUnlessOutputCheckerMatch(self, want, got, msg=None): """ Fail unless the specified string matches the expected. :param want: The desired output pattern. :param got: The actual text to match. :param msg: A message to prefix on the failure message. :return: ``None``. :raises self.failureException: If the text does not match. Fail the test unless ``want`` matches ``got``, as determined by a ``doctest.OutputChecker`` instance. This is not an equality check, but a pattern match according to the ``OutputChecker`` rules. """ checker = doctest.OutputChecker() want = textwrap.dedent(want) source = "" example = doctest.Example(source, want) got = textwrap.dedent(got) checker_optionflags = functools.reduce(operator.or_, [ doctest.ELLIPSIS, ]) if not checker.check_output(want, got, checker_optionflags): if msg is None: diff = checker.output_difference(example, got, checker_optionflags) msg = "\n".join([ "Output received did not match expected output", "{diff}", ]).format(diff=diff) raise self.failureException(msg)
def unitTestWithOutput(testname, studentfilename, outputstr, input_str, feedback): """ :param: test name for the feedback :param studentfilename: Must exist :param outputstr: to compare student execution output with :param input_str: :param: feedback: feedback object :return: if the test run smoothly """ xb, xo, xe = executefromfilename(studentfilename, input_str) if not xb: feedback.addTestError( testname, " Problèmes avec votre code \n " + xo + "\n" + xe, "") return False oc = doctest.OutputChecker() res = oc.check_output(outputstr, xo, 0) #print("inputstr:", input_str,"attendu:", outputstr) #print(" recu:",xo) if res: feedback.addTestSuccess(testname, xo, outputstr) else: r = oc.output_difference(doctest.Example(" le test", outputstr), xo, 0) if r.startswith("Expected:") and "Got:" in r: want, got = r.split("Got:") want = want[9:] else: want = r got = "" feedback.addTestFailure(testname, got, want) return True
def __init__(self, filename): self.parser = doctest.DocTestParser() self.retval = 0 self._filename = filename self._default_duration = 0.01 self._prompt = False self._interact = False self._pos = 0 self._windows = {} self._ns = {} self._source_id = -1 self._stmts = self.parser.get_examples(open(filename).read()) self._checker = doctest.OutputChecker() # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() self._stdout = sys.stdout self._options = (doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE | doctest.REPORT_UDIFF) self._updateFile = False self._caughtExceptions = [] # list of (exception,traceback) pairs wi = WidgetIntrospecter() wi.register_event_handler() wi.connect('window-added', self._on_wi__window_added) wi.connect('window-removed', self._on_wi__window_removed)
def PageTestSuite(storydir, package=None, setUp=setUpGlobs): """Create a suite of page tests for files found in storydir. :param storydir: the directory containing the page tests. :param package: the package to resolve storydir relative to. Defaults to the caller's package. Each file is added as a separate DocFileTest. """ # we need to normalise the package name here, because it # involves checking the parent stack frame. Otherwise the # files would be looked up relative to this module. package = doctest._normalize_module(package) abs_storydir = doctest._module_relative_path(package, storydir) filenames = set(filename for filename in os.listdir(abs_storydir) if filename.lower().endswith('.txt')) suite = unittest.TestSuite() # Add tests to the suite individually. if filenames: checker = doctest.OutputChecker() paths = [os.path.join(storydir, filename) for filename in filenames] suite.addTest( LayeredDocFileSuite(paths=paths, package=package, checker=checker, stdout_logging=False, layer=PageTestLayer, setUp=setUp)) return suite
def failUnlessOutputCheckerMatch(self, want, got, msg=None): """ Fail unless the specified string matches the expected. Fail the test unless ``want`` matches ``got``, as determined by a ``doctest.OutputChecker`` instance. This is not an equality check, but a pattern match according to the ``OutputChecker`` rules. """ checker = doctest.OutputChecker() want = textwrap.dedent(want) source = "" example = doctest.Example(source, want) got = textwrap.dedent(got) checker_optionflags = reduce(operator.or_, [ doctest.ELLIPSIS, ]) if not checker.check_output(want, got, checker_optionflags): if msg is None: diff = checker.output_difference(example, got, checker_optionflags) msg = "\n".join([ "Output received did not match expected output", "%(diff)s", ]) % vars() raise self.failureException(msg)
def __init__(self, *args, **kw): unittest.TestCase.__init__(self, *args, **kw) self._checker = doctest.OutputChecker() self._optionflags = ( doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE )
def _check_basestring(self, expected, actual, msg=None): flags = (doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE + doctest.REPORT_NDIFF) checker = doctest.OutputChecker() right = checker.check_output(expected, actual, flags) if not right: diff = checker.output_difference(Example(expected), actual, flags) raise self.failureException(diff) return right
def main(runOne: Union[str, bool] = False): if runOne is False: nbvalNotebook.runAll() totalTests = 0 totalFailures = 0 timeStart = time.time() unused_dtr = doctest.DocTestRunner(doctest.OutputChecker(), verbose=False, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) for mt in getDocumentationFiles(runOne): # if 'examples' in mt.module: # continue print(mt.module + ": ", end="") try: if mt.autoGen is False: (failCount, testCount) = doctest.testfile(mt.fullModulePath, module_relative=False, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) else: print('ipython/autogenerated; no tests') continue ### this was an attempt to run the ipynb through the doctest, but ### it required too many compromises in how we'd like to write a user's ### guide -- i.e., dicts can change order, etc. better just to ### monthly run through the User's Guide line by line and update. # examples = getDocumentationFromAutoGen(mt.fullModulePath) # dt = doctest.DocTest([doctest.Example(e[0], e[1]) for e in examples], {}, # mt.moduleNoExtension, mt.fullModulePath, 0, None) # (failCount, testCount) = dtr.run(dt) if failCount > 0: print( f"{mt.module} had {failCount} failures in {testCount} tests" ) elif testCount == 0: print("no tests") else: print(f"all {testCount} tests ran successfully") totalTests += testCount totalFailures += failCount except Exception as e: # pylint: disable=broad-except print(f"failed miserably! {str(e)}") import traceback tb = traceback.format_exc() print(f"Here's the traceback for the exception: \n{tb}") elapsedTime = time.time() - timeStart print( f"Ran {totalTests} tests ({totalFailures} failed) in {elapsedTime:.4f} seconds" )
def compare_strings(self, expected, actual): example = argparse.Namespace() example.want = expected output_checker = doctest.OutputChecker() flags = (doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_NDIFF) success = output_checker.check_output(expected, actual, flags) if not success: diff = output_checker.output_difference(example, actual, flags) raise Exception(diff)
def compare_strings(*, expected="", actual=""): actual = uqbar.strings.normalize(ansi_escape.sub("", actual)) expected = uqbar.strings.normalize(ansi_escape.sub("", expected)) example = types.SimpleNamespace() example.want = expected output_checker = doctest.OutputChecker() flags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_NDIFF success = output_checker.check_output(expected, actual, flags) if not success: diff = output_checker.output_difference(example, actual, flags) raise Exception(diff)
def __str__(self): msg = """ %(connection)s Failed example: %(cmd)s %(diff)s """ checker = doctest.OutputChecker() connection = self.example.connection cmd = self.example.cmd diff = checker.output_difference(self.example, self.got, self.optionflags) return msg % vars()
def __init__(self, example, flags=0): """Create a DocTestMatches to match example. :param example: The example to match e.g. 'foo bar baz' :param flags: doctest comparison flags to match on. e.g. doctest.ELLIPSIS. """ if not example.endswith('\n'): example += '\n' self.want = example # required variable name by doctest. self.flags = flags self._checker = doctest.OutputChecker()
def validate(self, command, args, expected, options): stdout, stderr = self.run_command(command, args) checker = doctest.OutputChecker() if checker.check_output(expected.rstrip(), stdout.rstrip() + stderr.rstrip(), options): return msg = 'Clatter test failed. {0} != {1}\n\n+ {0}\n- {1}'.format( stdout + stderr, expected.rstrip()) raise ValueError(msg)
def assert_doctest_equal(got, want, options=('ELLIPSIS',)): import doctest assert isinstance(got, str), got optionflags = 0 for o in options: optionflags |= doctest.OPTIONFLAGS_BY_NAME[o] checker = doctest.OutputChecker() checked = checker.check_output(want, got, optionflags) if not checked: raise AssertionError(checker.output_difference( doctest.Example('dummy', want), got + '\n', optionflags, ).rstrip('\n'))
def testTypeCheckedDocstringGetsFoundByDoctest(self): import doctest import doctests finder = doctest.DocTestFinder(verbose=True) tests = finder.find(doctests) self.assertEquals(3, len(tests)) runner = doctest.DocTestRunner(doctest.OutputChecker()) for test in tests: runner.run(test) self.assertEquals(7, runner.summarize()[1]) self.assertEquals(0, runner.summarize()[0])
def check_text_ellipsis_doctest(reference, actual): """ >>> check_text_ellipsis_doctest("user: ...\\nname: elevation", ... "user: some_user\\nname: elevation") True >>> check_text_ellipsis_doctest("user: ...\\nname: elevation", ... "user: \\nname: elevation") True This function is using doctest's function to check the result, so we will discuss here how the underlying function behaves. >>> checker = doctest.OutputChecker() >>> checker.check_output("user: some_user\\nname: elevation", ... "user: some_user\\nname: elevation", ... optionflags=None) True >>> checker.check_output("user: user1\\nname: elevation", ... "user: some_user\\nname: elevation", ... optionflags=doctest.ELLIPSIS) False >>> checker.check_output("user: ...\\nname: elevation", ... "user: some_user\\nname: elevation", ... optionflags=doctest.ELLIPSIS) True The ellipsis matches also an empty string, so the following matches: >>> checker.check_output("user: ...\\nname: elevation", ... "user: \\nname: elevation", ... optionflags=doctest.ELLIPSIS) True It is robust concerning misspelled matching string but does not allow ellipsis followed by a dot, e.g. at the end of the sentence: >>> checker.check_output("user: ....\\nname: elevation", ... "user: some_user\\nname: elevation", ... optionflags=doctest.ELLIPSIS) False """ # this can be also global checker = doctest.OutputChecker() return checker.check_output(reference, actual, optionflags=doctest.ELLIPSIS)
def test_check_output_with_whitespace_normalization(): # basically a unittest for a method in the doctest stdlib got = "{'a': 3, 'b': 44, 'c': 20}" want = textwrap.dedent(""" {'a': 3, 'b': 44, 'c': 20} """) assert doctest.OutputChecker().check_output( want, got, optionflags=doctest.NORMALIZE_WHITESPACE) # check_output() with the NORMALIZE_WHITESPACE flag basically does the # following got = ' '.join(got.split()) want = ' '.join(want.split()) assert got == want
def validate(self, command, args, expected, options): stdout, stderr = self.run_command(command, args) checker = doctest.OutputChecker() options = self.options | options if checker.check_output(expected.rstrip(), stdout.rstrip() + stderr.rstrip(), options): return self.want = expected.rstrip() msg = checker.output_difference(self, stdout.rstrip() + stderr.rstrip(), options) raise ValueError(msg)
def doctest_compare_str( want, got, options=('ELLIPSIS',), ): """ Allowed options: see doctest.OPTIONFLAGS_BY_NAME.keys() 'DONT_ACCEPT_BLANKLINE', 'DONT_ACCEPT_TRUE_FOR_1', 'ELLIPSIS', 'FAIL_FAST', 'IGNORE_EXCEPTION_DETAIL', 'NORMALIZE_WHITESPACE', 'REPORT_CDIFF', 'REPORT_NDIFF', 'REPORT_ONLY_FIRST_FAILURE', 'REPORT_UDIFF', 'SKIP', >>> print(doctest_compare_str('bla...', 'blab')) None >>> print(doctest_compare_str('bla...', 'blub')) Expected: bla... Got: blub """ optionflags = 0 for o in options: optionflags |= doctest.OPTIONFLAGS_BY_NAME[o] checker = doctest.OutputChecker() checked = checker.check_output(want, got, optionflags) # print(checked, 'checked') if not checked: class Example: def __init__(self, want): self.want = want return checker.output_difference( Example(want + '\n'), got + '\n', optionflags, ).rstrip('\n')
def repr_failure(self, excinfo): import doctest if excinfo.errisinstance( (doctest.DocTestFailure, doctest.UnexpectedException)): doctestfailure = excinfo.value example = doctestfailure.example test = doctestfailure.test filename = test.filename if test.lineno is None: lineno = None else: lineno = test.lineno + example.lineno + 1 message = excinfo.type.__name__ reprlocation = ReprFileLocation(filename, lineno, message) checker = doctest.OutputChecker() REPORT_UDIFF = doctest.REPORT_UDIFF filelines = py.path.local(filename).readlines(cr=0) lines = [] if lineno is not None: i = max(test.lineno, max(0, lineno - 10)) # XXX? for line in filelines[i:lineno]: lines.append("%03d %s" % (i + 1, line)) i += 1 else: lines.append( 'EXAMPLE LOCATION UNKNOWN, not showing all tests of that example' ) indent = '>>>' for line in example.source.splitlines(): lines.append('??? %s %s' % (indent, line)) indent = '...' if excinfo.errisinstance(doctest.DocTestFailure): lines += checker.output_difference(example, doctestfailure.got, REPORT_UDIFF).split("\n") else: inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) lines += [ "UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value) ] lines += traceback.format_exception(*excinfo.value.exc_info) return ReprFailDoctest(reprlocation, lines) else: return super(DoctestItem, self).repr_failure(excinfo)
def __init__(self, filename): self.parser = doctest.DocTestParser() self.retval = 0 self._filename = filename self._pos = 0 self._windows = {} self._ns = {} self._source_id = -1 self._stmts = self.parser.get_examples(open(filename).read()) self._checker = doctest.OutputChecker() # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() self._options = doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE wi = WidgetIntrospecter() wi.register_event_handler() wi.connect('window-added', self._on_wi__window_added) wi.connect('window-removed', self._on_wi__window_removed)
def assertOutput(self, connection, cmd, want, optionflags=None, timeout=None): optionflags = optionflags or 0 got = connection(cmd, timeout=timeout) checker = doctest.OutputChecker() result = checker.check_output(want, got, optionflags) if result == True: return _connection, _cmd, _want = connection, cmd, want class Example(object): connection = _connection cmd = _cmd want = _want raise DocTestFailure, (Example, got, optionflags)
def test_extract_floats(self, text, expected_floats): extract_floats = keras_doctest_lib._FloatExtractor() output_checker = keras_doctest_lib.KerasDoctestOutputChecker() (text_parts, extracted_floats) = extract_floats(text) text_with_wildcards = "...".join(text_parts) # Check that the lengths match before doing anything else. try: self.assertLen(extracted_floats, len(expected_floats)) except AssertionError as e: msg = "\n\n expected: {}\n found: {}".format( expected_floats, extracted_floats) e.args = (e.args[0] + msg, ) raise e # The floats should match according to allclose try: self.assertTrue( output_checker._allclose(expected_floats, extracted_floats)) except AssertionError as e: msg = "\n\nexpected: {}\nfound: {}".format( expected_floats, extracted_floats) e.args = (e.args[0] + msg, ) raise e # The wildcard text should match the input text, according to the # OutputChecker base class. try: self.assertTrue(doctest.OutputChecker().check_output( want=text_with_wildcards, got=text, optionflags=doctest.ELLIPSIS, )) except AssertionError as e: msg = "\n\n expected: {}\n found: {}".format( text_with_wildcards, text) e.args = (e.args[0] + msg, ) raise e
def test_readme_shell_cmds(ensure_doc_scratch, check): """Perform testing on README shell command examples.""" text = Path("README.rst").read_text() chk = dt.OutputChecker() dt_flags = dt.ELLIPSIS | dt.NORMALIZE_WHITESPACE for i, mch in enumerate(p_shell.finditer(text)): cmd = mch.group("cmd") out = mch.group("out") proc = sp.run( # noqa: S603 shlex.split(cmd), stdout=sp.PIPE, stderr=sp.STDOUT, timeout=30 ) result = proc.stdout.decode("utf-8") msg = "\n\nExpected:\n" + out + "\n\nGot:\n" + result with check.check(msg=f"match_{i}"): assert chk.check_output(out, result, dt_flags), msg
def drop_create(name=DB_NAME, lang='en'): if db_exist(name): drop_db(name) create_db(name, lang) def doctest_setup(test): return drop_create() def doctest_teardown(test): return drop_db() doctest_checker = doctest.OutputChecker() class TestSuite(unittest.TestSuite): def run(self, *args, **kwargs): DatabaseOperationalError = backend.get('DatabaseOperationalError') while True: try: exist = db_exist() break except DatabaseOperationalError as err: # Retry on connection error sys.stderr.write(str(err)) time.sleep(1) result = super(TestSuite, self).run(*args, **kwargs) if not exist:
class Checker(doctest.OutputChecker): obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>') vanilla = doctest.OutputChecker() rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"} stopwords = { 'plt.', '.hist', '.show', '.ylim', '.subplot(', 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(', '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim', '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(', '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel=' } def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2): self.parse_namedtuples = parse_namedtuples self.atol, self.rtol = atol, rtol if ns is None: self.ns = dict(CHECK_NAMESPACE) else: self.ns = ns def check_output(self, want, got, optionflags): # cut it short if they are equal if want == got: return True # skip stopwords in source if any(word in self._source for word in self.stopwords): return True # skip random stuff if any(word in want for word in self.rndm_markers): return True # skip function/object addresses if self.obj_pattern.search(got): return True # ignore comments (e.g. signal.freqresp) if want.lstrip().startswith("#"): return True # try the standard doctest try: if self.vanilla.check_output(want, got, optionflags): return True except Exception: pass # OK then, convert strings to objects try: a_want = eval(want, dict(self.ns)) a_got = eval(got, dict(self.ns)) except Exception: # Maybe we're printing a numpy array? This produces invalid python # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between # values. So, reinsert commas and retry. # TODO: handle (1) abberivation (`print(np.arange(10000))`), and # (2) n-dim arrays with n > 1 s_want = want.strip() s_got = got.strip() cond = (s_want.startswith("[") and s_want.endswith("]") and s_got.startswith("[") and s_got.endswith("]")) if cond: s_want = ", ".join(s_want[1:-1].split()) s_got = ", ".join(s_got[1:-1].split()) return self.check_output(s_want, s_got, optionflags) if not self.parse_namedtuples: return False # suppose that "want" is a tuple, and "got" is smth like # MoodResult(statistic=10, pvalue=0.1). # Then convert the latter to the tuple (10, 0.1), # and then compare the tuples. try: num = len(a_want) regex = (r'[\w\d_]+\(' + ', '.join([r'[\w\d_]+=(.+)'] * num) + r'\)') grp = re.findall(regex, got.replace('\n', ' ')) if len(grp) > 1: # no more than one for now return False # fold it back to a tuple got_again = '(' + ', '.join(grp[0]) + ')' return self.check_output(want, got_again, optionflags) except Exception: return False # ... and defer to numpy try: return self._do_check(a_want, a_got) except Exception: # heterog tuple, eg (1, np.array([1., 2.])) try: return all(self._do_check(w, g) for w, g in zip(a_want, a_got)) except (TypeError, ValueError): return False def _do_check(self, want, got): # This should be done exactly as written to correctly handle all of # numpy-comparable objects, strings, and heterogeneous tuples try: if want == got: return True except Exception: pass return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
class Checker(doctest.OutputChecker): obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>') vanilla = doctest.OutputChecker() rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"} stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(', 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(', '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim', '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(', '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='} def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2): self.parse_namedtuples = parse_namedtuples self.atol, self.rtol = atol, rtol if ns is None: self.ns = dict(CHECK_NAMESPACE) else: self.ns = ns def check_output(self, want, got, optionflags): # cut it short if they are equal if want == got: return True # skip stopwords in source if any(word in self._source for word in self.stopwords): return True # skip random stuff if any(word in want for word in self.rndm_markers): return True # skip function/object addresses if self.obj_pattern.search(got): return True # ignore comments (e.g. signal.freqresp) if want.lstrip().startswith("#"): return True # try the standard doctest try: if self.vanilla.check_output(want, got, optionflags): return True except Exception: pass # OK then, convert strings to objects try: a_want = eval(want, dict(self.ns)) a_got = eval(got, dict(self.ns)) except Exception: # Maybe we're printing a numpy array? This produces invalid python # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between # values. So, reinsert commas and retry. # TODO: handle (1) abberivation (`print(np.arange(10000))`), and # (2) n-dim arrays with n > 1 s_want = want.strip() s_got = got.strip() cond = (s_want.startswith("[") and s_want.endswith("]") and s_got.startswith("[") and s_got.endswith("]")) if cond: s_want = ", ".join(s_want[1:-1].split()) s_got = ", ".join(s_got[1:-1].split()) return self.check_output(s_want, s_got, optionflags) # maybe we are dealing with masked arrays? # their repr uses '--' for masked values and this is invalid syntax # If so, replace '--' by nans (they are masked anyway) and retry if 'masked_array' in want or 'masked_array' in got: s_want = want.replace('--', 'nan') s_got = got.replace('--', 'nan') return self.check_output(s_want, s_got, optionflags) if "=" not in want and "=" not in got: # if we're here, want and got cannot be eval-ed (hence cannot # be converted to numpy objects), they are not namedtuples # (those must have at least one '=' sign). # Thus they should have compared equal with vanilla doctest. # Since they did not, it's an error. return False if not self.parse_namedtuples: return False # suppose that "want" is a tuple, and "got" is smth like # MoodResult(statistic=10, pvalue=0.1). # Then convert the latter to the tuple (10, 0.1), # and then compare the tuples. try: got_again = try_convert_namedtuple(got) want_again = try_convert_namedtuple(want) except Exception: return False else: return self.check_output(want_again, got_again, optionflags) # ... and defer to numpy try: return self._do_check(a_want, a_got) except Exception: # heterog tuple, eg (1, np.array([1., 2.])) try: return all(self._do_check(w, g) for w, g in zip(a_want, a_got)) except (TypeError, ValueError): return False def _do_check(self, want, got): # This should be done exactly as written to correctly handle all of # numpy-comparable objects, strings, and heterogeneous tuples try: if want == got: return True except Exception: pass return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def update_doctests(filename, in_place, verbose): orig_text = pathlib.Path(filename).read_text() # New text assembled new_text = '' # Namespace used for executing examples context = dict(ts=ts, np=np) orig_lines = orig_text.splitlines() # DocTestParser skips examples that are blank or are entirely comments. We # need to add them back in. def add_comment_examples(start_line, end_line): nonlocal new_text for line in orig_lines[start_line:end_line]: if re.fullmatch(r'\s*>>>\s+#.*', line): new_text += line + '\n' prev_line = 0 for example in doctest.DocTestParser().parse(orig_text, filename): if isinstance(example, str): new_text += example continue assert isinstance(example, doctest.Example) add_comment_examples(prev_line, example.lineno) prev_line = example.lineno # Prefix added to all examples to ensure `await` is parsed correctly. async_prefix = 'async def foo():\n' formatted, valid = yapf.yapflib.yapf_api.FormatCode( async_prefix + textwrap.indent(example.source, ' '), style_config={ 'based_on_style': 'google', # Add 2 due to extra `async def foo` wrapping. # Subtract 4 due to ">>> " 'column_limit': 80 + 2 - example.indent - 4, }) formatted = textwrap.dedent(formatted[len(async_prefix):]) for i, line in enumerate(formatted.splitlines()): prompt = '>>> ' if i == 0 else '... ' new_text += ' ' * example.indent + prompt + line + '\n' fakeout = io.StringIO() # Support top-level await # https://bugs.python.org/issue37006 # https://github.com/python/cpython/compare/master...tirkarthi:asyncio-await-doctest loop = asyncio.get_event_loop() orig_stdout = sys.stdout success = True if verbose: print(example.source) # Execute the example try: sys.stdout = fakeout code = compile(source=example.source, filename='fakefile.py', mode='single', flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT) if code.co_flags & inspect.CO_COROUTINE: loop.run_until_complete(eval(code, context)) # pylint: disable=eval-used else: exec(code, context) # pylint: disable=exec-used actual_output = fakeout.getvalue() if actual_output and not actual_output.endswith('\n'): actual_output += '\n' except KeyboardInterrupt: raise except: # pylint: disable=bare-except exc_type, exc_value, _ = sys.exc_info() success = False actual_output = ( 'Traceback (most recent call last):\n ...\n' + traceback.format_exception_only(exc_type, exc_value)[-1] + '\n') finally: sys.stdout = orig_stdout output = None if example.want: if doctest.OutputChecker().check_output(example.want, actual_output, doctest.ELLIPSIS): # Preserve existing output if it matches (in case it contains ellipses). output = example.want else: output = actual_output if not success and not example.want: output = actual_output if output: for line in output.rstrip('\n').splitlines(): new_text += ' ' * example.indent + line + '\n' add_comment_examples(prev_line, None) if in_place: with open(filename, 'w') as f: f.write(new_text) else: print(new_text)
class Checker(doctest.OutputChecker): obj_pattern = re.compile('at 0x[0-9a-fA-F]+>') vanilla = doctest.OutputChecker() rndm_markers = { '# random', '# Random', '#random', '#Random', "# may vary" } stopwords = { 'plt.', '.hist', '.show', '.ylim', '.subplot(', 'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(', '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim' } def __init__(self, parse_namedtuples=True, atol=1e-8, rtol=1e-2): self.parse_namedtuples = parse_namedtuples self.atol, self.rtol = atol, rtol def check_output(self, want, got, optionflags): # cut it short if they are equal if want == got: return True # skip stopwords in source if any(word in self._source for word in self.stopwords): return True # skip random stuff if any(word in want for word in self.rndm_markers): return True # skip function/object addresses if self.obj_pattern.search(got): return True # ignore comments (e.g. signal.freqresp) if want.lstrip().startswith("#"): return True # try the standard doctest try: if self.vanilla.check_output(want, got, optionflags): return True except Exception: pass # OK then, convert strings to objects try: a_want = eval(want, dict(ns)) a_got = eval(got, dict(ns)) except: if not self.parse_namedtuples: return False # suppose that "want" is a tuple, and "got" is smth like # MoodResult(statistic=10, pvalue=0.1). # Then convert the latter to the tuple (10, 0.1), # and then compare the tuples. try: num = len(a_want) regex = ('[\w\d_]+\(' + ', '.join(['[\w\d_]+=(.+)'] * num) + '\)') grp = re.findall(regex, got.replace('\n', ' ')) if len(grp) > 1: # no more than one for now return False # fold it back to a tuple got_again = '(' + ', '.join(grp[0]) + ')' return self.check_output(want, got_again, optionflags) except Exception: return False # ... and defer to numpy try: return self._do_check(a_want, a_got) except Exception: # heterog tuple, eg (1, np.array([1., 2.])) try: return all( self._do_check(w, g) for w, g in zip(a_want, a_got)) except TypeError: return False def _do_check(self, want, got): # This should be done exactly as written to correctly handle all of # numpy-comparable objects, strings, and heterogenous tuples try: if want == got: return True except Exception: pass return np.allclose(want, got, atol=self.atol, rtol=self.rtol)