Beispiel #1
0
    def apply(self):
        glue_domain = NbGlueDomain.from_env(self.app.env)  # type: NbGlueDomain
        for paste_node in self.document.traverse(PasteNode):

            if paste_node.key not in glue_domain:
                SPHINX_LOGGER.warning(
                    (f"Couldn't find key `{paste_node.key}` "
                     "in keys defined across all pages."),
                    location=(paste_node.source, paste_node.line),
                )
                continue

            # Grab the output for this key
            output = glue_domain.get(paste_node.key)

            out_node = paste_node.create_node(output=output,
                                              document=self.document,
                                              env=self.app.env)
            if out_node is None:
                SPHINX_LOGGER.warning(
                    ("Couldn't find compatible output format for key "
                     f"`{paste_node.key}`"),
                    location=(paste_node.source, paste_node.line),
                )
            else:
                paste_node.replace_self(out_node)
Beispiel #2
0
    def parse(self, inputstring: str, document: nodes.document):

        self.reporter = document.reporter
        self.env = document.settings.env  # type: BuildEnvironment

        converter = get_nb_converter(
            self.env.doc2path(self.env.docname, False),
            self.env,
            inputstring.splitlines(keepends=True),
        )

        if converter is None:
            # Read the notebook as a text-document
            super().parse(inputstring, document=document)
            return

        try:
            ntbk = converter.func(inputstring)
        except Exception as error:
            SPHINX_LOGGER.error(
                "MyST-NB: Conversion to notebook failed: %s",
                error,
                # exc_info=True,
                location=(self.env.docname, 1),
            )
            return

        # add outputs to notebook from the cache
        if self.env.config["jupyter_execute_notebooks"] != "off":
            ntbk = generate_notebook_outputs(
                self.env,
                ntbk,
                show_traceback=self.env.config["execution_show_tb"])

        # Parse the notebook content to a list of syntax tokens and an env
        # containing global data like reference definitions
        md_parser, env, tokens = nb_to_tokens(
            ntbk,
            self.env.myst_config if converter is None else converter.config,
            self.env.config["nb_render_plugin"],
        )

        # Write the notebook's output to disk
        path_doc = nb_output_to_disc(ntbk, document)

        # Update our glue key list with new ones defined in this page
        glue_domain = NbGlueDomain.from_env(self.env)
        glue_domain.add_notebook(ntbk, path_doc)

        # Render the Markdown tokens to docutils AST.
        tokens_to_docutils(md_parser, env, tokens, document)
def test_parser(sphinx_run, clean_doctree, file_regression):
    sphinx_run.build()
    # print(sphinx_run.status())
    assert sphinx_run.warnings() == ""
    doctree = clean_doctree(sphinx_run.get_resolved_doctree("with_glue"))
    file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8")
    glue_domain = NbGlueDomain.from_env(sphinx_run.app.env)
    assert set(glue_domain.cache) == {
        "key_text1",
        "key_float",
        "key_undisplayed",
        "key_df",
        "key_plt",
        "sym_eq",
    }
    glue_domain.clear_doc("with_glue")
    assert glue_domain.cache == {}
    assert glue_domain.docmap == {}
Beispiel #4
0
    def parse(self, inputstring: str, document: nodes.document):

        self.reporter = document.reporter
        self.env = document.settings.env
        self.config = self.default_config.copy()
        try:
            new_cfg = document.settings.env.config.myst_config
            self.config.update(new_cfg)
        except AttributeError:
            pass

        try:
            ntbk = string_to_notebook(inputstring, self.env)
        except Exception as err:
            SPHINX_LOGGER.error("Notebook load failed for %s: %s",
                                self.env.docname, err)
            return
        if not ntbk:
            # Read the notebook as a text-document
            to_docutils(inputstring, options=self.config, document=document)
            return

        # add outputs to notebook from the cache
        if self.env.config["jupyter_execute_notebooks"] != "off":
            ntbk = add_notebook_outputs(
                self.env,
                ntbk,
                show_traceback=self.env.config["execution_show_tb"])

        # Parse the notebook content to a list of syntax tokens and an env
        # containing global data like reference definitions
        md_parser, env, tokens = nb_to_tokens(ntbk)

        # Write the notebook's output to disk
        path_doc = nb_output_to_disc(ntbk, document)

        # Update our glue key list with new ones defined in this page
        glue_domain = NbGlueDomain.from_env(self.env)
        glue_domain.add_notebook(ntbk, path_doc)

        # Render the Markdown tokens to docutils AST.
        tokens_to_docutils(md_parser, env, tokens, document)
Beispiel #5
0
def test_parser(sphinx_run, file_regression):
    sphinx_run.build()
    # print(sphinx_run.status())
    assert sphinx_run.warnings() == ""
    document = sphinx_run.get_doctree()
    transformer = Transformer(document)
    transformer.add_transforms([CellOutputsToNodes, transform.PasteNodesToDocutils])
    transformer.apply_transforms()
    file_regression.check(document.pformat(), extension=".xml")
    glue_domain = NbGlueDomain.from_env(sphinx_run.app.env)
    assert set(glue_domain.cache) == {
        "key_text1",
        "key_float",
        "key_undisplayed",
        "key_df",
        "key_plt",
        "sym_eq",
    }
    glue_domain.clear_doc("with_glue")
    assert glue_domain.cache == {}
    assert glue_domain.docmap == {}
Beispiel #6
0
def test_parser(mock_document, get_notebook, file_regression):
    parser = NotebookParser()
    parser.parse(get_notebook("with_glue.ipynb").read_text(), mock_document)

    transformer = Transformer(mock_document)
    transformer.add_transforms(
        [CellOutputsToNodes, transform.PasteNodesToDocutils])
    transformer.apply_transforms()

    file_regression.check(mock_document.pformat(), extension=".xml")
    glue_domain = NbGlueDomain.from_env(mock_document.document.settings.env)
    assert set(glue_domain.cache) == {
        "key_text1",
        "key_float",
        "key_undisplayed",
        "key_df",
        "key_plt",
        "sym_eq",
    }
    glue_domain.clear_doc(mock_document.settings.env.docname)
    assert glue_domain.cache == {}
    assert glue_domain.docmap == {}
Beispiel #7
0
    def __init__(self, tmp_path):
        self.docname = "source/nb"
        self.dependencies = defaultdict(set)
        self.domaindata = {}
        self.domains = {
            NbGlueDomain.name: NbGlueDomain(self),
            MathDomain.name: MathDomain(self),
        }
        self._tmp_path = tmp_path

        class app:
            class builder:
                name = "html"

            class config:
                language = None

            env = self
            srcdir = tmp_path / "source"
            outdir = tmp_path / "build" / "outdir"

        self.app = app
Beispiel #8
0
    def parse(self, inputstring, document):

        # de-serialize the notebook
        ntbk = nbf.reads(inputstring, nbf.NO_CONVERT)

        # This is a contaner for top level markdown tokens
        # which we will add to as we walk the document
        mkdown_tokens = []  # type: list[BlockToken]

        # First we ensure that we are using a 'clean' global context
        # for parsing, which is setup with the MyST parsing tokens
        # the logger will report on duplicate link/footnote definitions, etc
        parse_context = ParseContext(
            find_blocks=SphinxNBRenderer.default_block_tokens,
            find_spans=SphinxNBRenderer.default_span_tokens,
            logger=SPHINX_LOGGER,
        )
        set_parse_context(parse_context)

        for cell_index, nb_cell in enumerate(ntbk.cells):

            # Skip empty cells
            if len(nb_cell["source"].strip()) == 0:
                continue

            # skip cells tagged for removal
            tags = nb_cell.metadata.get("tags", [])
            if "remove_cell" in tags:
                continue

            if nb_cell["cell_type"] == "markdown":

                # we add the document path and cell index
                # to the source lines, so they can be included in the error logging
                # NOTE: currently the logic to report metadata is not written
                # into SphinxRenderer, but this will be introduced in a later update
                lines = SourceLines(
                    nb_cell["source"],
                    uri=document["source"],
                    metadata={"cell_index": cell_index},
                    standardize_ends=True,
                )

                # parse the source markdown text;
                # at this point span/inline level tokens are not yet processed, but
                # link/footnote definitions are collected/stored in the global context
                mkdown_tokens.extend(tokenize_block(lines))

                # TODO for md cells, think of a way to implement the previous
                # `if "hide_input" in tags:` logic

            elif nb_cell["cell_type"] == "code":
                # here we do nothing but store the cell as a custom token
                mkdown_tokens.append(
                    NbCodeCell(
                        cell=nb_cell,
                        position=Position(
                            line_start=0,
                            uri=document["source"],
                            data={"cell_index": cell_index},
                        ),
                    ))

        # Now all definitions have been gathered, we walk the tokens and
        # process any inline text
        for token in mkdown_tokens + list(
                get_parse_context().foot_definitions.values()):
            token.expand_spans()

        # If there are widgets, this will embed the state of all widgets in a script
        if contains_widgets(ntbk):
            mkdown_tokens.insert(0,
                                 JupyterWidgetState(state=get_widgets(ntbk)))

        # create the front matter token
        front_matter = FrontMatter(content=ntbk.metadata, position=None)

        # Finally, we create the top-level markdown document
        markdown_doc = Document(
            children=mkdown_tokens,
            front_matter=front_matter,
            link_definitions=parse_context.link_definitions,
            footnotes=parse_context.foot_definitions,
            footref_order=parse_context.foot_references,
        )

        self.reporter = document.reporter
        self.config = self.default_config.copy()
        try:
            new_cfg = document.settings.env.config.myst_config
            self.config.update(new_cfg)
        except AttributeError:
            pass

        # Remove all the mime prefixes from "glue" step.
        # This way, writing properly captures the glued images
        replace_mime = []
        for cell in ntbk.cells:
            if hasattr(cell, "outputs"):
                for out in cell.outputs:
                    if "data" in out:
                        # Only do the mimebundle replacing for the scrapbook outputs
                        mime_prefix = (out.get("metadata",
                                               {}).get("scrapbook",
                                                       {}).get("mime_prefix"))
                        if mime_prefix:
                            out["data"] = {
                                key.replace(mime_prefix, ""): val
                                for key, val in out["data"].items()
                            }
                            replace_mime.append(out)

        # Write the notebook's output to disk. This changes metadata in notebook cells
        path_doc = Path(document.settings.env.docname)
        doc_relpath = path_doc.parent
        doc_filename = path_doc.name
        build_dir = Path(document.settings.env.app.outdir).parent
        output_dir = build_dir.joinpath("jupyter_execute", doc_relpath)
        write_notebook_output(ntbk, str(output_dir), doc_filename)

        # Now add back the mime prefixes to the right outputs so they aren't rendered
        # until called from the role/directive
        for out in replace_mime:
            out["data"] = {
                f"{GLUE_PREFIX}{key}": val
                for key, val in out["data"].items()
            }

        # Update our glue key list with new ones defined in this page
        glue_domain = NbGlueDomain.from_env(document.settings.env)
        glue_domain.add_notebook(ntbk, path_doc)

        # render the Markdown AST to docutils AST
        renderer = SphinxNBRenderer(parse_context=parse_context,
                                    document=document,
                                    current_node=None)
        renderer.render(markdown_doc)