Example #1
0
    def transform_document(self, document):
        """Apply transforms to the given ``document``.

        Args:
            document: Document tree.

        See Also:
            :attr:`transforms`.
        """
        if self.transforms:
            transformer = Transformer(document)
            transformer.add_transforms(self.transforms)
            transformer.apply_transforms()
Example #2
0
File: rst.py Project: dexy/dexy
    def process_text(self, input_text):
        warning_stream = io.StringIO()
        settings_overrides = {}
        settings_overrides['warning_stream'] = warning_stream

        # Parse the input text using default settings
        settings = OptionParser(components=(Parser,)).get_default_values()
        parser = Parser()
        document = new_document('rstinfo', settings)
        parser.parse(input_text, document)

        # Transform the parse tree so that the bibliographic data is
        # is promoted from a mere field list to a `docinfo` node
        t = Transformer(document)
        t.add_transforms([frontmatter.DocTitle, frontmatter.DocInfo])
        t.apply_transforms()

        info = {}

        # Process individual nodes which are not part of docinfo.
        single_nodes = [
                docutils.nodes.title,
                docutils.nodes.subtitle,
                ]
        for node in single_nodes:
            for doc in document.traverse(node):
                if not len(doc.children) == 1:
                    msg = "Expected node %s to only have 1 child."
                    raise dexy.exceptions.InternalDexyProblem(msg % node)
                info[doc.tagname] = doc.children[0].astext()

        # Find the `docinfo` node and extract its children. Non-standard
        # bibliographic fields will have the `tagname` 'field' and two
        # children, the name and the value.  Standard fields simply keep
        # the name as the `tagname`.
        for doc in document.traverse(docutils.nodes.docinfo):
            for element in doc.children:
                if element.tagname == 'field':
                    name, value = element.children
                    name, value = name.astext(), value.astext()
                else:
                    name, value = element.tagname, element.astext()
                info[name] = value

        self.log_debug("found info:\n%s\n" % info)
        self.update_all_args(info)
        self.log_debug("docutils warnings:\n%s\n" % warning_stream.getvalue())

        return input_text
Example #3
0
def transform_doctree(unid,
                      doctree,
                      transforms,
                      pickle_receiver=None,
                      report_level=1):
    """
    Run the transforms on the document tree.  This may modify the tree,
    which will have an effect later on if using that stored document tree as
    a source for rendering.
    """
    # Create transformer.
    doctree.transformer = Transformer(doctree)

    # Add a transform to remove system messages.
    doctree.transformer.add_transform(FilterMessages, priority=1)

    # Populate with transforms.
    for tclass, storage in transforms:
        assert issubclass(tclass, Extractor)
        doctree.transformer.add_transform(tclass,
                                          unid=unid,
                                          storage=storage,
                                          pickle_receiver=pickle_receiver)

    # Create an appropriate reporter.
    fend = docutils.frontend.OptionParser()
    settings = fend.get_default_values()
    errstream = StringIO.StringIO()
    settings.update(
        {
            'warning_stream': errstream,
            'error_encoding': 'UTF-8',
            'halt_level': 100,  # never halt
            'report_level': report_level,
        },
        fend)
    doctree.reporter = docutils.utils.new_reporter('', settings)

    # Apply the transforms.
    try:
        doctree.transformer.apply_transforms()
    except Exception, e:
        traceback.print_exc(sys.stderr)
Example #4
0
def test_parser(sphinx_run, file_regression):
    sphinx_run.build()
    # print(sphinx_run.status())
    assert sphinx_run.warnings() == ""
    document = sphinx_run.get_doctree()
    transformer = Transformer(document)
    transformer.add_transforms([CellOutputsToNodes, transform.PasteNodesToDocutils])
    transformer.apply_transforms()
    file_regression.check(document.pformat(), extension=".xml")
    glue_domain = NbGlueDomain.from_env(sphinx_run.app.env)
    assert set(glue_domain.cache) == {
        "key_text1",
        "key_float",
        "key_undisplayed",
        "key_df",
        "key_plt",
        "sym_eq",
    }
    glue_domain.clear_doc("with_glue")
    assert glue_domain.cache == {}
    assert glue_domain.docmap == {}
Example #5
0
def test_parser(mock_document, get_notebook, file_regression):
    parser = NotebookParser()
    parser.parse(get_notebook("with_glue.ipynb").read_text(), mock_document)

    transformer = Transformer(mock_document)
    transformer.add_transforms(
        [CellOutputsToNodes, transform.PasteNodesToDocutils])
    transformer.apply_transforms()

    file_regression.check(mock_document.pformat(), extension=".xml")
    glue_domain = NbGlueDomain.from_env(mock_document.document.settings.env)
    assert set(glue_domain.cache) == {
        "key_text1",
        "key_float",
        "key_undisplayed",
        "key_df",
        "key_plt",
        "sym_eq",
    }
    glue_domain.clear_doc(mock_document.settings.env.docname)
    assert glue_domain.cache == {}
    assert glue_domain.docmap == {}
Example #6
0
 def apply_transforms(self):
     """Apply transforms on current document tree."""
     if self.transforms:
         transformer = Transformer(self.definition.doc_block.document)
         transformer.add_transforms(self.transforms)
         transformer.apply_transforms()