def test_repeated_footnote(caplog):
    get_parse_context().block_tokens.insert_before(
        block_tokens_ext.Footnote, block_tokens.LinkDefinition
    )
    tokenize_main(["[^1]: value1\n", "[^1]: value2\n"])
    assert "ignoring duplicate footnote definition" in caplog.text
    assert len(get_parse_context().foot_definitions) == 1
def test_sphinx_directives(sphinx_renderer, name, directive):
    """See https://docutils.sourceforge.io/docs/ref/rst/directives.html"""
    # TODO make sure all directives from domains are added (std and rst are done)
    # (some were erroneously added to roles)
    if name in ["include", "literalinclude"]:
        # this is tested in the sphinx build level tests
        return
    if name in [
            "meta",
            # TODO to properly parse, this requires that a directive with no content,
            # and no options, can have its argument be the body
            "productionlist",
    ]:
        # TODO fix skips
        pytest.skip("awaiting fix")
    arguments = " ".join(directive["args"])
    sphinx_renderer.render(
        tokenize_main([
            "```{{{}}} {}\n".format(name, arguments),
            directive.get("content", "") + "\n",
            "```\n",
        ])[0])
    print(
        repr(sphinx_renderer.document.pformat()).replace(" " * 8,
                                                         "    ").replace(
                                                             '"', '\\"'))
    assert sphinx_renderer.document.pformat() == (
        directive.get("doc_tag", '<document source="notset">') + "\n" +
        indent(directive["output"], "    ") +
        ("\n" if directive["output"] else ""))
def test_docutils_directives(renderer, name, directive):
    """See https://docutils.sourceforge.io/docs/ref/rst/directives.html"""
    # TODO dd domain directives
    if name in [
            "role",
            "rst-class",
            "cssclass",
            "line-block",
            "block_quote",  # this is only used as a base class
    ]:
        # TODO fix skips
        pytest.skip("awaiting fix")
    arguments = " ".join(directive["args"])
    renderer.render(
        tokenize_main([
            "```{{{}}} {}\n".format(name, arguments),
            directive.get("content", "") + "\n",
            "```\n",
        ])[0])
    print(
        repr(renderer.document.pformat()).replace(" " * 8,
                                                  "    ").replace('"', '\\"'))
    assert renderer.document.pformat() == (
        directive.get("doc_tag", '<document source="notset">') + "\n" +
        indent(directive["output"], "    ") +
        ("\n" if directive["output"] else ""))
Exemple #4
0
def test_fenced_code(name, source, data_regression):
    from mistletoe.base_elements import SourceLines

    print(SourceLines(source).lines)
    data_regression.check(
        serialize_tokens(tokenize_main(source), as_dict=True),
        basename=f"test_fenced_code_{name}",
    )
Exemple #5
0
def test_target_block(renderer_mock):
    renderer_mock.render(tokenize_main(["(target)="])[0])
    assert renderer_mock.document.pformat() == dedent(
        """\
    <document source="notset">
        <target ids="target" names="target">
    """
    )
Exemple #6
0
def test_directive(renderer):
    output = renderer.render(
        tokenize_main(["```{name} arg\n", "foo\n", "```\n"])[0])
    assert output == dedent("""\
        <div class="myst-directive">
        <pre><code>{name} arg
        foo
        </code></pre></span>
        </div>""")
Exemple #7
0
def test_link_definitions(name, source, data_regression):
    tree = serialize_tokens(tokenize_main(source), as_dict=True)
    data_regression.check(
        {
            "tree": tree,
            "link_definitions": get_parse_context().link_definitions
        },
        basename=f"test_link_definitions_{name}",
    )
Exemple #8
0
def test_block_break(renderer_mock):
    renderer_mock.render(tokenize_main(["+++ string"])[0])
    assert renderer_mock.document.pformat() == dedent(
        """\
    <document source="notset">
        <comment classes="block_break" xml:space="preserve">
            string
    """
    )
Exemple #9
0
def test_image_with_alt(renderer_mock):
    renderer_mock.render(tokenize_main([r"![alt](path/to/image.jpeg)"])[0])
    assert renderer_mock.document.pformat() == dedent(
        """\
    <document source="notset">
        <paragraph>
            <image alt="alt" uri="path/to/image.jpeg">
    """
    )
Exemple #10
0
def test_block_code_no_language(renderer_mock):

    renderer_mock.render(tokenize_main(["```\n", "foo\n", "```\n"])[0])
    assert renderer_mock.document.pformat() == dedent(
        """\
    <document source="notset">
        <literal_block language="" xml:space="preserve">
            foo
    """
    )
Exemple #11
0
def test_target_inline(renderer_mock):
    renderer_mock.render(tokenize_main(["A b(target)="])[0])
    assert renderer_mock.document.pformat() == dedent(
        """\
    <document source="notset">
        <paragraph>
            A b
            <target ids="target" names="target">
    """
    )
def test_foot_definition(name, source, data_regression):
    get_parse_context().block_tokens.insert_before(
        block_tokens_ext.Footnote, block_tokens.LinkDefinition
    )
    tree = serialize_tokens(tokenize_main(source), as_dict=True)
    footnotes = serialize_tokens(get_parse_context().foot_definitions, as_dict=True)
    data_regression.check(
        {
            "tree": tree,
            "footnotes": footnotes,
            "link_definitions": get_parse_context().link_definitions,
        },
        basename=f"test_foot_definitions_{name}",
    )
def test_docutils_roles(renderer, name, role_data):
    """"""
    if name in ["raw"]:
        # TODO fix skips
        pytest.skip("awaiting fix")
    text = "{{{0}}}`{1}`".format(name, role_data.get("content", " "))
    print(text)
    renderer.render(tokenize_main([text])[0])
    print(
        repr(renderer.document.pformat()).replace(" " * 8,
                                                  "    ").replace('"', '\\"'))
    assert renderer.document.pformat() == (
        role_data.get("doc_tag", '<document source="notset">') + "\n" +
        indent(role_data["output"], "    ") +
        ("\n" if role_data["output"] else ""))
def test_sphinx_roles(sphinx_renderer, name, role_data):
    """"""
    # note, I think most of these have are actually directives rather than roles,
    # that I've erroneously picked up in my gather function.
    if name in ["abbr"
                ]:  # adding class="<function class_option at 0x102260290>" ??
        # TODO fix skips
        pytest.skip("awaiting fix")
    sphinx_renderer.render(
        tokenize_main(
            ["{{{}}}`{}`".format(name, role_data.get("content", "a"))])[0])
    print(
        repr(sphinx_renderer.document.pformat()).replace(" " * 8,
                                                         "    ").replace(
                                                             '"', '\\"'))
    assert sphinx_renderer.document.pformat() == (
        role_data.get("doc_tag", '<document source="notset">') + "\n" +
        indent(role_data["output"], "    ") +
        ("\n" if role_data["output"] else ""))
    def read(
        cls,
        lines: Union[str, ListType[str], SourceLines],
        reset_definitions: bool = True,
        skip_tokens: list = ("LinkDefinition", "Footnote"),
        front_matter: bool = False,
    ):
        """Read a document

        :param lines: Lines to parse
        :param reset_definitions: remove any previously stored definitions
            in the global context (see ``ParseContext.reset_definitions()``).
        :param skip_tokens: do not store these ``token.name`` in the syntax tree.
            These are usually tokens that store themselves in the global context.
        :param front_matter: search for an initial YAML block front matter block
            (note this is not strictly CommonMark compliant)
        """
        if reset_definitions:
            get_parse_context().reset_definitions()

        if not isinstance(lines, SourceLines):
            lines = SourceLines(lines, standardize_ends=True)

        # TODO can we do this in a way where we are checking
        # FrontMatter in get_parse_context().block_tokens?
        # then it would be easier to add/remove it in the renderers
        front_matter_token = None
        if front_matter and lines.peek() and lines.peek().startswith("---"):
            front_matter_token = FrontMatter.read(lines)

        children = tokenizer.tokenize_main(lines=lines, skip_tokens=skip_tokens)
        foot_defs = get_parse_context().foot_definitions
        return cls(
            children=children,
            front_matter=front_matter_token,
            link_definitions=get_parse_context().link_definitions,
            footnotes=foot_defs,
            footref_order=[
                t for t in get_parse_context().foot_references if t in foot_defs
            ],
        )
Exemple #16
0
def test_block_code(name, source, data_regression):
    data_regression.check(
        serialize_tokens(tokenize_main(source), as_dict=True),
        basename=f"test_block_code_{name}",
    )
Exemple #17
0
def test_render(name, source, result, file_regression):
    with Scheme() as renderer:
        token = tokenize_main(source)[0]
        assert renderer.render(token) == result
Exemple #18
0
def test_setext_heading(name, source, data_regression):
    data_regression.check(
        serialize_tokens(tokenize_main(source), as_dict=True),
        basename=f"test_setext_heading_{name}",
    )
Exemple #19
0
def test_math(renderer):
    output = renderer.render(tokenize_main(["$a=1$"])[0])
    assert output == dedent("<p>$$a=1$$</p>")
Exemple #20
0
def test_repeated_link_defs(caplog):
    tokenize_main(["[a]: value1\n", "[a]: value2\n"])
    assert "ignoring duplicate link definition" in caplog.text
    assert len(get_parse_context().link_definitions) == 1
Exemple #21
0
def test_thematic_break(name, source, data_regression):
    data_regression.check(
        serialize_tokens(tokenize_main(source), as_dict=True),
        basename=f"test_thematic_break_{name}",
    )
Exemple #22
0
def test_role(renderer):
    output = renderer.render(tokenize_main(["{name}`content`"])[0])
    assert output == (
        '<p><span class="myst-role"><code>{name}content</code></span></p>')
Exemple #23
0
def test_list_item(name, source, data_regression):
    data_regression.check(
        serialize_tokens(tokenize_main(source), as_dict=True),
        basename=f"test_list_item_{name}",
    )
Exemple #24
0
def test_line_comment(renderer):
    output = renderer.render(tokenize_main([r"% abc"])[0])
    assert output == "<!-- abc -->"