def step(part, scope, state): """ Step through one part of the document 1. Prose: pass through 2. Code fence: record 3. Code: evaluate 4. Code with html output: print source, end code block, print html, start code block """ if isinstance(part, (str, unicode)) and iscodefence(part): if 'code' in state: del state['code'] else: state['code'] = part return [part], scope, state if isinstance(part, (str, unicode)): return [part], scope, state if isinstance(part, doctest.Example): if valid_statement('_last = ' + part.source): code = compile('_last = ' + part.source, '<pymarkdown>', 'single') exec(code, scope) result = scope.pop('_last') else: with swap_stdout() as s: code = compile(part.source, '<pymarkdown>', 'single') exec(code, scope) result = s.read().rstrip().strip("'") if isassignment(part.source): out = [doctest.Example(part.source, '')] elif type_key(result) in custom_renderers: func = custom_renderers[type_key(type(result))] out = [doctest.Example(part.source, '')] + func(result, state) elif hasattr(result, '__repr_html__'): out = [ doctest.Example(part.source, ''), closing_fence(state['code']), result.__repr_html__(), state['code'] ] elif hasattr(result, 'to_html'): out = [ doctest.Example(part.source, ''), closing_fence(state['code']), result.to_html(), state['code'] ] else: if not isinstance(result, str): result = repr(result) out = [doctest.Example(part.source, result)] del scope['__builtins__'] return out, scope, state raise NotImplementedError()
def output_difference(self, example, got, optionflags): want, got = self.fix(example.want, got) if want != example.want: example = doctest.Example(example.source, want, example.exc_msg, example.lineno, example.indent, example.options) return super().output_difference(example, got, optionflags)
def unitTestWithOutput(testname, studentfilename, outputstr, input_str, feedback): """ :param: test name for the feedback :param studentfilename: Must exist :param outputstr: to compare student execution output with :param input_str: :param: feedback: feedback object :return: if the test run smoothly """ xb, xo, xe = executefromfilename(studentfilename, input_str) if not xb: feedback.addTestError( testname, " Problèmes avec votre code \n " + xo + "\n" + xe, "") return False oc = doctest.OutputChecker() res = oc.check_output(outputstr, xo, 0) #print("inputstr:", input_str,"attendu:", outputstr) #print(" recu:",xo) if res: feedback.addTestSuccess(testname, xo, outputstr) else: r = oc.output_difference(doctest.Example(" le test", outputstr), xo, 0) if r.startswith("Expected:") and "Got:" in r: want, got = r.split("Got:") want = want[9:] else: want = r got = "" feedback.addTestFailure(testname, got, want) return True
def collect(self): # When running directly from pytest we need to make sure that we # don't accidentally import setup.py! if self.fspath.basename == "setup.py": return elif self.fspath.basename == "conftest.py": module = self.config.pluginmanager._importconftest(self.fspath) else: try: module = self.fspath.pyimport() except ImportError: pytest.skip("unable to import module %r" % self.fspath) # NOT USED: While correct, this breaks existing behavior. # if self.config.getvalue("doctest_ignore_import_errors"): # pytest.skip("unable to import module %r" % self.fspath) # else: # raise options = get_optionflags(self) | FIX # uses internal doctest module parsing mechanism finder = DocTestFinderPlus() runner = doctest.DebugRunner(verbose=False, optionflags=options, checker=OutputChecker()) for test in finder.find(module): if test.examples: # skip empty doctests if config.getoption('remote_data', 'none') != 'any': ignore_warnings_context_needed = False for example in test.examples: # If warnings are to be ignored we need to catch them by # wrapping the source in a context manager. if example.options.get(IGNORE_WARNINGS, False): example.source = ( "with _doctestplus_ignore_all_warnings():\n" + indent(example.source, ' ')) ignore_warnings_context_needed = True if example.options.get(REMOTE_DATA): example.options[doctest.SKIP] = True # We insert the definition of the context manager to ignore # warnings at the start of the file if needed. if ignore_warnings_context_needed: test.examples.insert( 0, doctest.Example(source=IGNORE_WARNINGS_CONTEXT, want='')) try: yield doctest_plugin.DoctestItem.from_parent( self, name=test.name, runner=runner, dtest=test) except AttributeError: # pytest < 5.4 yield doctest_plugin.DoctestItem( test.name, self, runner, test)
def failUnlessOutputCheckerMatch(self, want, got, msg=None): """ Fail unless the specified string matches the expected. Fail the test unless ``want`` matches ``got``, as determined by a ``doctest.OutputChecker`` instance. This is not an equality check, but a pattern match according to the ``OutputChecker`` rules. """ checker = doctest.OutputChecker() want = textwrap.dedent(want) source = "" example = doctest.Example(source, want) got = textwrap.dedent(got) checker_optionflags = reduce(operator.or_, [ doctest.ELLIPSIS, ]) if not checker.check_output(want, got, checker_optionflags): if msg is None: diff = checker.output_difference(example, got, checker_optionflags) msg = "\n".join([ "Output received did not match expected output", "%(diff)s", ]) % vars() raise self.failureException(msg)
def failUnlessOutputCheckerMatch(self, want, got, msg=None): """ Fail unless the specified string matches the expected. :param want: The desired output pattern. :param got: The actual text to match. :param msg: A message to prefix on the failure message. :return: ``None``. :raises self.failureException: If the text does not match. Fail the test unless ``want`` matches ``got``, as determined by a ``doctest.OutputChecker`` instance. This is not an equality check, but a pattern match according to the ``OutputChecker`` rules. """ checker = doctest.OutputChecker() want = textwrap.dedent(want) source = "" example = doctest.Example(source, want) got = textwrap.dedent(got) checker_optionflags = functools.reduce(operator.or_, [ doctest.ELLIPSIS, ]) if not checker.check_output(want, got, checker_optionflags): if msg is None: diff = checker.output_difference(example, got, checker_optionflags) msg = "\n".join([ "Output received did not match expected output", "{diff}", ]).format(diff=diff) raise self.failureException(msg)
def modify_example(example): new_source = example.source new_want = example.want # README is formatted without "..." before multi-line input to make code easy to copy-paste if new_source.endswith('"""\n'): new_source += new_want + '\n"""' new_want = "" # doctest sometimes incorrectly includes markdown in returned example if new_want.endswith("```\n"): new_want = new_want[:new_want.index("```")] # README's serialize() has spaces instead of tabs to make output easier to read if new_want.startswith("# text"): new_want = re.sub(r" {2,}", "\t", new_want) new_want = new_want.rstrip() + "\n\n" # README cheats and prints return value without quotes new_want = repr(new_want) # README has examples with lists formatted in multiple lines to make them easier to read if new_want.startswith(("[", "Token([", "Metadata([")): new_want = ReadmeTestParser.normalize_whitespace(new_want) example = doctest.Example(source=new_source, want=new_want, exc_msg=example.exc_msg, lineno=example.lineno, indent=example.indent, options=example.options) return example
def diff(self, want): r""" Analyse differences between observed MiniMock usage and that which we expected, if any. :param want: the :class:`Printer` output that results from expected usage of mocked objects :type want: string :rtype: a string summary of differences between the observed usage and the ``want`` parameter Example:: >>> tt = TraceTracker() >>> m = Mock('mock_obj', tracker=tt) >>> m.some_meth('dummy argument') >>> tt.diff("does not match") "Expected:\n does not match\nGot:\n Called mock_obj.some_meth('dummy argument')\n" >>> tt.diff("Called mock_obj.some_meth('dummy argument')") '' """ if self.check(want): # doctest's output_difference always returns a diff, even if # there's no difference: short circuit that feature. return '' else: return self.checker.output_difference(doctest.Example("", want), self.dump(), optionflags=self.options)
def run_setup_cleanup(runner, testcodes, what): # type: (Any, List[TestCode], Any) -> bool examples = [] for testcode in testcodes: example = doctest.Example(testcode.code, "", lineno=testcode.lineno) examples.append(example) if not examples: return True # simulate a doctest with the code sim_doctest = doctest.DocTest( examples, {}, "%s (%s code)" % (group.name, what), testcodes[0].filename, 0, None, ) sim_doctest.globs = ns old_f = runner.failures self.type = "exec" # the snippet may contain multiple statements runner.run(sim_doctest, out=self._warn_out, clear_globs=False) if runner.failures > old_f: return False return True
def run_setup_cleanup(runner, testcodes, what): # type: (Any, List[TestCode], Any) -> bool examples = [] for testcode in testcodes: examples.append( doctest.Example( # type: ignore doctest_encode(testcode.code, self.env.config.source_encoding), '', # type: ignore # NOQA lineno=testcode.lineno)) if not examples: return True # simulate a doctest with the code sim_doctest = doctest.DocTest( examples, {}, # type: ignore '%s (%s code)' % (group.name, what), filename_str, 0, None) sim_doctest.globs = ns old_f = runner.failures self.type = 'exec' # the snippet may contain multiple statements runner.run(sim_doctest, out=self._warn_out, clear_globs=False) if runner.failures > old_f: return False return True
def parse(self, string, name='<string>'): pieces = doctest.DocTestParser.parse(self, string, name) for i, val in enumerate(pieces): if (isinstance(val, doctest.Example) and not self.py_version_suitable(val)): pieces[i] = doctest.Example('1', '1') return pieces
def assert_xml_equal(expected_xml, got_xml): checker = LXMLOutputChecker() if not checker.check_output(expected_xml, got_xml, 0): raise AssertionError(checker.output_difference( doctest.Example("", expected_xml), got_xml, 0 ))
def assert_xml_equal(expected_xml, got_xml, context_explanation=""): checker = LXMLOutputChecker() if not checker.check_output(expected_xml, got_xml, 0): raise AssertionError("{context_explanation}{xml_diff}".format( context_explanation=("" if not context_explanation else "\n{0}\n".format(context_explanation)), xml_diff=checker.output_difference( doctest.Example("", expected_xml), got_xml, 0)))
def output_difference(self, example, got, optionflags): want, got = self.fix(example.want, got) want = example.want if want != example.want: example = doctest.Example(example.source, want, example.exc_msg, example.lineno, example.indent, example.options) return super(_DeferrredDataframeOutputChecker, self).output_difference(example, got, optionflags)
def parse(self, string, name='<string>'): pieces = doctest.DocTestParser.parse(self, string, name) for i, val in enumerate(pieces): if (isinstance(val, doctest.Example) and ((val.options.get(PY24, False) and sys.version[:2] < (2, 4)) or (val.options.get(PY25, False) and sys.version[:2] < (2, 5)))): pieces[i] = doctest.Example('1', '1') return pieces
def test_warning_messages(self, want, got): output_checker = tf_doctest_lib.TfDoctestOutputChecker() output_checker.check_output( want=want, got=got, optionflags=doctest.ELLIPSIS) example = doctest.Example('None', want=want) result = output_checker.output_difference( example=example, got=got, optionflags=doctest.ELLIPSIS) self.assertIn("doesn't work if *some* of the", result)
def modify_example(example): new_want = Python2DocTestParser.add_u_before_strings(example.want) example = doctest.Example(source=example.source, want=new_want, exc_msg=example.exc_msg, lineno=example.lineno, indent=example.indent, options=example.options) return example
def assertXmlEquivalent(self, got, expect): """Asserts both xml parse to the same results `got` may be an XML string or lxml Element """ checker = LXMLOutputChecker() if isinstance(got, etree._Element): got = etree.tostring(got) if not checker.check_output(expect, got, PARSE_XML): message = checker.output_difference(doctest.Example("", expect), got, PARSE_XML) self.fail(message)
def assert_doctest_equal(got, want, options=('ELLIPSIS',)): import doctest assert isinstance(got, str), got optionflags = 0 for o in options: optionflags |= doctest.OPTIONFLAGS_BY_NAME[o] checker = doctest.OutputChecker() checked = checker.check_output(want, got, optionflags) if not checked: raise AssertionError(checker.output_difference( doctest.Example('dummy', want), got + '\n', optionflags, ).rstrip('\n'))
def run_setup_cleanup(runner, testcodes, what): examples = [] for testcode in testcodes: examples.append( doctest.Example(testcode.code, '', lineno=testcode.lineno)) if not examples: return True # simulate a doctest with the code sim_doctest = doctest.DocTest(examples, {}, '%s (%s code)' % (group.name, what), filename, 0, None) old_f = runner.failures self.type = 'exec' # the snippet may contain multiple statements runner.run(sim_doctest, out=self._warn_out) if runner.failures > old_f: return False return True
def parse(self, string, name='<string>'): """ Divide the given string into examples and intervening text, and return them as a list of alternating Examples and strings. Line numbers for the Examples are 0-based. The optional argument `name` is a name identifying this string, and is only used for error messages. """ string = string.expandtabs() # If all lines begin with the same indentation, then strip it. min_indent = self._min_indent(string) if min_indent > 0: string = '\n'.join([l[min_indent:] for l in string.split('\n')]) output = [] charno, lineno = 0, 0 # Find all doctest examples in the string: for m in self._EXAMPLE_RE.finditer(string): # Add the pre-example text to `output`. output.append(string[charno:m.start()]) # Update lineno (lines before this example) lineno += string.count('\n', charno, m.start()) # Extract info from the regexp match. (source, options, want, exc_msg) = \ self._parse_example(m, name, lineno) # Create an Example, and add it to the list. if not self._IS_BLANK_OR_COMMENT(source): # @@: Erg, this is the only line I need to change... output.append( doctest.Example( source, want, exc_msg, lineno=lineno, indent=min_indent + len(m.group('indent') or m.group('runindent')), options=options)) # Update lineno (lines inside this example) lineno += string.count('\n', m.start(), m.end()) # Update charno. charno = m.end() # Add any remaining post-example text to `output`. output.append(string[charno:]) return output
def parse(self, string, name='<string>'): if sys.version_info > (3, 0): # HACK: Convert <u'string'> to <'strings'> string = string.replace("u'", "'") # HACK: Convert exception class names string = string.replace( 'StructuringError:', 'epydoc.markup.epytext.StructuringError:') string = string.replace( 'InvalidDottedName:', 'epydoc.apidoc.DottedName.InvalidDottedName:') pieces = doctest.DocTestParser.parse(self, string, name) for i, val in enumerate(pieces): if (isinstance(val, doctest.Example) and ((val.options.get(PY24, False) and sys.version_info[:2] < (2, 4)) or (val.options.get(PY25, False) and sys.version_info[:2] < (2, 5)) or (val.options.get(PY2ONLY, False) and sys.version_info[:2] >= (3, 0)))): pieces[i] = doctest.Example('1', '1') return pieces
def get_examples(self, string: str, name: str = '<string>') -> Iterable[doctest.Example]: # Check for a file-level skip comment. if re.search('<!--.*?doctest.*?skip.*?all.*?-->', string, re.IGNORECASE): return for match in self.fence_cell_re.finditer(string): if re.search('doctest.*skip', match.group(0), re.IGNORECASE): continue groups = match.groupdict() source = textwrap.dedent(groups['doctest']) want = groups['output'] if want is not None: want = textwrap.dedent(want) yield doctest.Example( lineno=string[:match.start()].count('\n') + 1, source=source, want=want)
def separate_fence(part, endl='\n'): """ Separate code fences from prose or example sections >> separate_fence('Hello\n```python') ['Hello', '```python'] >> separate_fence(doctest.Example('1 + 1', '2\n```')) [Example('1 + 1', '2'), '```'] """ if isinstance(part, (str, unicode)): lines = part.split('\n') groups = itertools.groupby(lines, iscodefence) return ['\n'.join(group) for _, group in groups] if isinstance(part, doctest.Example): lines = part.want.rstrip().split('\n') fences = list(map(iscodefence, lines)) if any(fences): i = fences.index(True) return [ doctest.Example(part.source, '\n'.join(lines[:i])), lines[i], '\n'.join(lines[i + 1:]) ] else: return [part]
def test_step(): out, scope, state = step("prose", {'x': 1}, {}) assert (out, scope, state) == (["prose"], {'x': 1}, {}) out, scope, state = step("```Python", {'x': 1}, {}) assert (out, scope, state) == (["```Python"], { 'x': 1 }, { 'code': '```Python' }) # Remove code state out, scope, state = step("```", {'x': 1}, {'code': '```Python'}) assert (out, scope, state) == (["```"], {'x': 1}, {}) a = doctest.Example("x + 1", "3") b = doctest.Example("x + 1", "2") out, scope, state = step(a, {'x': 1}, {'code': '```Python'}) assert (out, scope, state) == ([b], {'x': 1}, {'code': '```Python'}) a = doctest.Example("y = x + 1", "") out, scope, state = step(a, {'x': 1}, {'code': '```Python'}) assert (out, scope, state) == ([a], { 'x': 1, 'y': 2 }, { 'code': '```Python' }) a = doctest.Example("Shout('Hello!')", '') out, scope, state = step(a, {'Shout': Shout}, {'code': '```Python'}) assert out == [a, '```', Shout('Hello!').__repr_html__(), '```Python'] assert state == {'code': '```Python'} a = doctest.Example("print(5)", '') b = doctest.Example("print(5)", '5') out, scope, state = step(a, {}, {'code': '```Python'}) assert (out, scope, state) == ([b], {}, {'code': '```Python'})
def parse(self, s, name=None): result = doctest.DocTestParser.parse(self, s, name=name) # result is a sequence of alternating text chunks and # doctest.Example objects. We need to look in the text # chunks for the special directives that help us determine # whether the following examples should be skipped. required = [] skip_next = False skip_all = False ext = os.path.splitext(name)[1] if name else '.rst' if ext not in comment_characters: warnings.warn("file format '{}' is not recognized, assuming " "'{}' as the comment character.".format( ext, comment_characters['.rst'])) ext = '.rst' comment_char = comment_characters[ext] ignore_warnings_context_needed = False for entry in result: if isinstance(entry, str) and entry: required = [] skip_next = False lines = entry.strip().splitlines() if any([ re.match( '{} doctest-skip-all'.format(comment_char), x.strip()) for x in lines ]): skip_all = True continue if not len(lines): continue # We allow last and second to last lines to match to allow # special environment to be in between, e.g. \begin{python} last_lines = lines[-2:] matches = [ re.match( r'{}\s+doctest-skip\s*::(\s+.*)?'.format( comment_char), last_line) for last_line in last_lines ] if len(matches) > 1: match = matches[0] or matches[1] else: match = matches[0] if match: marker = match.group(1) if (marker is None or (marker.strip() == 'win32' and sys.platform == 'win32')): skip_next = True continue matches = [ re.match( r'{}\s+doctest-requires\s*::\s+(.*)'.format( comment_char), last_line) for last_line in last_lines ] if len(matches) > 1: match = matches[0] or matches[1] else: match = matches[0] if match: # 'a a' or 'a,a' or 'a, a'-> [a, a] required = re.split(r'\s*[,\s]\s*', match.group(1)) elif isinstance(entry, doctest.Example): # If warnings are to be ignored we need to catch them by # wrapping the source in a context manager. if entry.options.get(IGNORE_WARNINGS, False): entry.source = ( "with _doctestplus_ignore_all_warnings():\n" + indent(entry.source, ' ')) ignore_warnings_context_needed = True has_required_modules = DocTestFinderPlus.check_required_modules( required) if skip_all or skip_next or not has_required_modules: entry.options[doctest.SKIP] = True if config.getoption('remote_data', 'none') != 'any' and entry.options.get( REMOTE_DATA): entry.options[doctest.SKIP] = True # We insert the definition of the context manager to ignore # warnings at the start of the file if needed. if ignore_warnings_context_needed: result.insert( 0, doctest.Example(source=IGNORE_WARNINGS_CONTEXT, want='')) return result
def assertXMLEqual(self, s1, s2, entity=None): """Assert that the two XML fragments are equal, tolerating the following variations: * whitespace outside of element content and attribute values. * order of attributes. * order of certain child elements (see `sort_elements` in this function). Parameters: * s1 and s2 are string representations of an XML fragment. The strings may be Unicode strings or UTF-8 encoded byte strings. The strings may contain an encoding declaration even when they are Unicode strings. Note: An encoding declaration is the `encoding` attribute in the XML declaration (aka XML processing statement), e.g.: <?xml version="1.0" encoding="utf-8" ?> """ # Ensure Unicode strings and remove encoding from XML declaration encoding_pattern = re.compile( r'^<\?xml +(([a-zA-Z0-9_]+=".*")?) +' + r'encoding="utf-8" +(([a-zA-Z0-9_]+=".*")?) *\?>') encoding_repl = r'<?xml \1 \3 ?>' s1 = re.sub(encoding_pattern, encoding_repl, _ensure_unicode(s1)) s2 = re.sub(encoding_pattern, encoding_repl, _ensure_unicode(s2)) parser = etree.XMLParser(remove_blank_text=True) x1 = etree.XML(s1, parser=parser) x2 = etree.XML(s2, parser=parser) # Sort certain elements def sort_children(root, sort_elements): for tag, attr in sort_elements: # elems is a list of elements with this tag name elems = root.xpath("//*[local-name() = $tag]", tag=tag) if len(elems) > 0: parent = elems[0].getparent() first = None after = None for i in range(0, len(parent)): if parent[i].tag == tag and first is None: first = i if parent[i].tag != tag and first is not None: after = i # The following changes the original XML tree: # The following pylint warning can safely be disabled, see # http://stackoverflow.com/a/25314665 # pylint: disable=cell-var-from-loop parent[first:after] = sorted(elems, key=lambda e: e.attrib[attr]) sort_elements = [ # Sort sibling elements with <first> tag by its <second> attribute ("IPARAMVALUE", "NAME"), ("PROPERTY", "NAME"), ("PARAMETER", "NAME"), ] sort_children(x1, sort_elements) sort_children(x2, sort_elements) ns1 = _ensure_unicode(etree.tostring(x1)) ns2 = _ensure_unicode(etree.tostring(x2)) checker = doctestcompare.LXMLOutputChecker() # This tolerates differences in whitespace and attribute order if not checker.check_output(ns1, ns2, 0): diff = checker.output_difference(doctest.Example("", ns1), ns2, 0) raise AssertionError("XML is not as expected in %s: %s"%\ (entity, diff))
def test_group(self, group): # type: (TestGroup) -> None ns = {} # type: Dict def run_setup_cleanup(runner, testcodes, what): # type: (Any, List[TestCode], Any) -> bool examples = [] for testcode in testcodes: examples.append( doctest.Example( # type: ignore doctest_encode(testcode.code, self.env.config.source_encoding), '', # type: ignore # NOQA lineno=testcode.lineno)) if not examples: return True # simulate a doctest with the code sim_doctest = doctest.DocTest(examples, {}, '%s (%s code)' % (group.name, what), testcodes[0].filename, 0, None) sim_doctest.globs = ns old_f = runner.failures self.type = 'exec' # the snippet may contain multiple statements runner.run(sim_doctest, out=self._warn_out, clear_globs=False) if runner.failures > old_f: return False return True # run the setup code if not run_setup_cleanup(self.setup_runner, group.setup, 'setup'): # if setup failed, don't run the group return # run the tests for code in group.tests: if len(code) == 1: # ordinary doctests (code/output interleaved) try: test = parser.get_doctest( # type: ignore doctest_encode(code[0].code, self.env.config.source_encoding), {}, # type: ignore # NOQA group.name, code[0].filename, code[0].lineno) except Exception: logger.warning(__('ignoring invalid doctest code: %r'), code[0].code, location=(code[0].filename, code[0].lineno)) continue if not test.examples: continue for example in test.examples: # apply directive's comparison options new_opt = code[0].options.copy() new_opt.update(example.options) example.options = new_opt self.type = 'single' # as for ordinary doctests else: # testcode and output separate output = code[1] and code[1].code or '' options = code[1] and code[1].options or {} # disable <BLANKLINE> processing as it is not needed options[doctest.DONT_ACCEPT_BLANKLINE] = True # find out if we're testing an exception m = parser._EXCEPTION_RE.match(output) # type: ignore if m: exc_msg = m.group('msg') else: exc_msg = None example = doctest.Example( # type: ignore doctest_encode(code[0].code, self.env.config.source_encoding), output, # type: ignore # NOQA exc_msg=exc_msg, lineno=code[0].lineno, options=options) test = doctest.DocTest( [example], {}, group.name, # type: ignore code[0].filename, code[0].lineno, None) self.type = 'exec' # multiple statements again # DocTest.__init__ copies the globs namespace, which we don't want test.globs = ns # also don't clear the globs namespace after running the doctest self.test_runner.run(test, out=self._warn_out, clear_globs=False) # run the cleanup run_setup_cleanup(self.cleanup_runner, group.cleanup, 'cleanup')
def assertXMLEqual(s_act, s_exp, entity): """ Assert that the two XML fragments are equal, tolerating the following variations: * whitespace outside of element content and attribute values. * order of attributes. * order of certain child elements (see `sort_elements` in this function). Parameters: * s_act and s_exp are string representations of an XML fragment. The strings may be Unicode strings or UTF-8 encoded byte strings. The strings may contain an encoding declaration even when they are Unicode strings. Note: An encoding declaration is the `encoding` attribute in the XML declaration (aka XML processing statement), e.g.: <?xml version="1.0" encoding="utf-8" ?> * entity (string): A human readable identification for what is compared. """ # Make sure that None values are already excluded by the caller assert isinstance(s_act, (six.text_type, six.binary_type)) assert isinstance(s_exp, (six.text_type, six.binary_type)) # Ensure Unicode strings and remove encoding from XML declaration encoding_pattern = re.compile( r'^<\?xml +(([a-zA-Z0-9_]+=".*")?) +' + r'encoding="utf-8" +(([a-zA-Z0-9_]+=".*")?) *\?>') encoding_repl = r'<?xml \1 \3 ?>' s_act = re.sub(encoding_pattern, encoding_repl, _ensure_unicode(s_act)) s_exp = re.sub(encoding_pattern, encoding_repl, _ensure_unicode(s_exp)) parser = etree.XMLParser(remove_blank_text=True) try: # Note: lxml.etree.XML() has issues with unicode strings as input, # so we pass UTF-8 encoded byte strings. See lxml bug # https://bugs.launchpad.net/lxml/+bug/1902364 for a similar issue # with lxml.etree.fromstring(). x_act = etree.XML(_ensure_bytes(s_act), parser=parser) x_exp = etree.XML(_ensure_bytes(s_exp), parser=parser) except etree.XMLSyntaxError as exc: raise AssertionError("XML cannot be validated for %s: %s" % (entity, exc)) def sort_embedded(root, sort_elements): """ Helper function for `sort_children()`, in support of embedded objects. This function invokes sort_children() on each embedded object in `root`, after unembedding the embedded object. Parameters: root (etree.Element): XML tree of the CIM-XML representation of the CIM element that contains an embedded CIM object (e.g. the CIM element may be an INSTANCE XML element and one of its PROPERTY child elements has a value that is an embedded CIM instance). """ emb_elems = root.xpath("//*[@EmbeddedObject or @EMBEDDEDOBJECT]" "/*[local-name() = 'VALUE' or " "local-name() = 'VALUE.ARRAY']") for emb_elem in emb_elems: elem = xml_unembed(emb_elem.text) sort_children(elem, sort_elements) emb_elem.text = xml_embed(elem) def sort_children(root, sort_elements): """ Sort certain elements in the `root` parameter to facilitate comparison of two XML documents. In addition, make sure this is also applied to any embedded objects (in their unembedded state). """ sort_embedded(root, sort_elements) for tag, attr in sort_elements: # elems is a list of elements with this tag name elems = root.xpath("//*[local-name() = $tag]", tag=tag) if elems: parent = elems[0].getparent() first = None after = None for i, p in enumerate(parent): # TODO 6/18 AM: Loop above should probably be on elems if p.tag == tag and first is None: first = i if p.tag != tag and first is not None: after = i # The following changes the original XML tree: # The following pylint warning can safely be disabled, see # https://stackoverflow.com/a/25314665 # pylint: disable=cell-var-from-loop parent[first:after] = sorted(elems, key=lambda e: e.attrib[attr]) sort_elements = [ # Sort sibling elements with <first> tag by its <second> attribute ("IPARAMVALUE", "NAME"), ("PROPERTY", "NAME"), ("PROPERTY.ARRAY", "NAME"), ("PARAMETER", "NAME"), ("KEYBINDING", "NAME"), ] sort_children(x_act, sort_elements) sort_children(x_exp, sort_elements) ns_act = _ensure_unicode(etree.tostring(x_act)) ns_exp = _ensure_unicode(etree.tostring(x_exp)) checker = doctestcompare.LXMLOutputChecker() # This tolerates differences in whitespace and attribute order if not checker.check_output(ns_act, ns_exp, 0): diff = checker.output_difference(doctest.Example("", ns_exp), ns_act, 0) raise AssertionError("XML is not as expected in %s: %s" % (entity, diff))
def parse_rst_ipython_tests(rst, name, extraglobs=None, optionflags=None): """Extracts examples from an rst file and produce a test suite by running them through pandas to get the expected outputs. """ # Optional dependency. import IPython from traitlets.config import Config def get_indent(line): return len(line) - len(line.lstrip()) def is_example_line(line): line = line.strip() return line and not line.startswith( '#') and not line[0] == line[-1] == ':' IMPORT_PANDAS = 'import pandas as pd' example_srcs = [] lines = iter([(lineno, line.rstrip()) for lineno, line in enumerate(rst.split('\n')) if is_example_line(line)] + [(None, 'END')]) # https://ipython.readthedocs.io/en/stable/sphinxext.html lineno, line = next(lines) while True: if line == 'END': break if line.startswith('.. ipython::'): lineno, line = next(lines) indent = get_indent(line) example = [] example_srcs.append((lineno, example)) while get_indent(line) >= indent: if '@verbatim' in line or ':verbatim:' in line or '@savefig' in line: example_srcs.pop() break line = re.sub(r'In \[\d+\]: ', '', line) line = re.sub(r'\.\.\.+:', '', line) example.append(line[indent:]) lineno, line = next(lines) if get_indent(line) == indent and line[indent] not in ')]}': example = [] example_srcs.append((lineno, example)) else: lineno, line = next(lines) # TODO(robertwb): Would it be better to try and detect/compare the actual # objects in two parallel sessions than make (stringified) doctests? examples = [] config = Config() config.HistoryManager.hist_file = ':memory:' config.InteractiveShell.autocall = False config.InteractiveShell.autoindent = False config.InteractiveShell.colors = 'NoColor' set_pandas_options() IP = IPython.InteractiveShell.instance(config=config) IP.run_cell(IMPORT_PANDAS + '\n') IP.run_cell('import numpy as np\n') try: stdout = sys.stdout for lineno, src in example_srcs: sys.stdout = cout = StringIO() src = '\n'.join(src) if src == IMPORT_PANDAS: continue IP.run_cell(src + '\n') output = cout.getvalue() if output: # Strip the prompt. # TODO(robertwb): Figure out how to suppress this. output = re.sub(r'^Out\[\d+\]:\s*', '', output) examples.append(doctest.Example(src, output, lineno=lineno)) finally: sys.stdout = stdout return doctest.DocTest(examples, dict(extraglobs or {}, np=np), name, name, None, None)