def transform_document(self, document): """Apply transforms to the given ``document``. Args: document: Document tree. See Also: :attr:`transforms`. """ if self.transforms: transformer = Transformer(document) transformer.add_transforms(self.transforms) transformer.apply_transforms()
def process_text(self, input_text): warning_stream = io.StringIO() settings_overrides = {} settings_overrides['warning_stream'] = warning_stream # Parse the input text using default settings settings = OptionParser(components=(Parser,)).get_default_values() parser = Parser() document = new_document('rstinfo', settings) parser.parse(input_text, document) # Transform the parse tree so that the bibliographic data is # is promoted from a mere field list to a `docinfo` node t = Transformer(document) t.add_transforms([frontmatter.DocTitle, frontmatter.DocInfo]) t.apply_transforms() info = {} # Process individual nodes which are not part of docinfo. single_nodes = [ docutils.nodes.title, docutils.nodes.subtitle, ] for node in single_nodes: for doc in document.traverse(node): if not len(doc.children) == 1: msg = "Expected node %s to only have 1 child." raise dexy.exceptions.InternalDexyProblem(msg % node) info[doc.tagname] = doc.children[0].astext() # Find the `docinfo` node and extract its children. Non-standard # bibliographic fields will have the `tagname` 'field' and two # children, the name and the value. Standard fields simply keep # the name as the `tagname`. for doc in document.traverse(docutils.nodes.docinfo): for element in doc.children: if element.tagname == 'field': name, value = element.children name, value = name.astext(), value.astext() else: name, value = element.tagname, element.astext() info[name] = value self.log_debug("found info:\n%s\n" % info) self.update_all_args(info) self.log_debug("docutils warnings:\n%s\n" % warning_stream.getvalue()) return input_text
def test_parser(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" document = sphinx_run.get_doctree() transformer = Transformer(document) transformer.add_transforms([CellOutputsToNodes, transform.PasteNodesToDocutils]) transformer.apply_transforms() file_regression.check(document.pformat(), extension=".xml") glue_domain = NbGlueDomain.from_env(sphinx_run.app.env) assert set(glue_domain.cache) == { "key_text1", "key_float", "key_undisplayed", "key_df", "key_plt", "sym_eq", } glue_domain.clear_doc("with_glue") assert glue_domain.cache == {} assert glue_domain.docmap == {}
def test_parser(mock_document, get_notebook, file_regression): parser = NotebookParser() parser.parse(get_notebook("with_glue.ipynb").read_text(), mock_document) transformer = Transformer(mock_document) transformer.add_transforms( [CellOutputsToNodes, transform.PasteNodesToDocutils]) transformer.apply_transforms() file_regression.check(mock_document.pformat(), extension=".xml") glue_domain = NbGlueDomain.from_env(mock_document.document.settings.env) assert set(glue_domain.cache) == { "key_text1", "key_float", "key_undisplayed", "key_df", "key_plt", "sym_eq", } glue_domain.clear_doc(mock_document.settings.env.docname) assert glue_domain.cache == {} assert glue_domain.docmap == {}
def apply_transforms(self): """Apply transforms on current document tree.""" if self.transforms: transformer = Transformer(self.definition.doc_block.document) transformer.add_transforms(self.transforms) transformer.apply_transforms()