Пример #1
0
def test_textual_content_673():
    """
    Test case 673:  Internal spaces are preserved verbatim:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """Multiple     spaces"""
    expected_tokens = [
        "[para(1,1):]", "[text:Multiple     spaces:]", "[end-para]"
    ]
    expected_gfm = """<p>Multiple     spaces</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_block_quotes_220():
    """
    Test case 220:  A blank line always separates block quotes:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """> foo

> bar"""
    expected_tokens = [
        "[block-quote(1,1):]",
        "[para(1,3):]",
        "[text:foo:]",
        "[end-para]",
        "[end-block-quote]",
        "[BLANK(2,1):]",
        "[block-quote(3,1):]",
        "[para(3,3):]",
        "[text:bar:]",
        "[end-para]",
        "[end-block-quote]",
    ]
    expected_gfm = """<blockquote>
<p>foo</p>
</blockquote>
<blockquote>
<p>bar</p>
</blockquote>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #3
0
def test_emphasis_457():
    """
    Test case 457:  (part 1) Rule 12
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """foo ___"""
    expected_tokens = [
        "[para(1,1):]", "[text:foo :]", "[text:___:]", "[end-para]"
    ]
    expected_gfm = """<p>foo ___</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #4
0
def test_raw_html_633():
    """
    Test case 633:  Empty elements:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """<a/><b2/>"""
    expected_tokens = [
        "[para(1,1):]", "[raw-html:a/]", "[raw-html:b2/]", "[end-para]"
    ]
    expected_gfm = """<p><a/><b2/></p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #5
0
def test_paragraph_blocks_192():
    """
    Test case 192:  Leading spaces are skipped:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """  aaa
 bbb"""
    expected_tokens = ["[para(1,3):  \n ]", "[text:aaa\nbbb::\n]", "[end-para]"]
    expected_gfm = """<p>aaa
bbb</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_autolinks_608():
    """
    Test case 608:  (part 3) Note that many strings that count as absolute URIs for purposes of this spec are not valid URIs, because their schemes are not registered or because of other problems with their syntax:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """<http://../>"""
    expected_tokens = [
        "[para(1,1):]", "[uri-autolink:http://../]", "[end-para]"
    ]
    expected_gfm = """<p><a href="http://../">http://../</a></p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_autolinks_615():
    """
    Test case 615:  (part 1) These are not autolinks:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """<>"""
    expected_tokens = [
        "[para(1,1):]", "[text:\a<\a&lt;\a\a>\a&gt;\a:]", "[end-para]"
    ]
    expected_gfm = """<p>&lt;&gt;</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #8
0
def test_thematic_breaks_020():
    """
    Test case 020:  More than three characters may be used:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """_____________________________________"""
    expected_tokens = [
        "[tbreak(1,1):_::_____________________________________]"
    ]
    expected_gfm = """<hr />"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_tables_extension_205():
    """
    Test case 205:  If there are no rows in the body, no <tbody> is generated in HTML output:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    source_markdown = """| abc | def |
| --- | --- |"""
    expected_tokens = [
        "[para(1,1):\n]",
        "[text:| abc | def |\n| --- | --- |::\n]",
        "[end-para]",
    ]

    # Act
    actual_tokens = tokenizer.transform(source_markdown)

    # Assert
    # TODO Expect this to fail when tables are implemented
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #10
0
def test_list_items_283():
    """
    Test case 283:  In CommonMark, a list can interrupt a paragraph. That is, no blank line is needed to separate a paragraph from a following list:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """Foo
- bar
- baz"""
    expected_tokens = [
        "[para(1,1):]",
        "[text:Foo:]",
        "[end-para]",
        "[ulist(2,1):-::2:]",
        "[para(2,3):]",
        "[text:bar:]",
        "[end-para]",
        "[li(3,1):2:]",
        "[para(3,3):]",
        "[text:baz:]",
        "[end-para]",
        "[end-ulist]",
    ]
    expected_gfm = """<p>Foo</p>
<ul>
<li>bar</li>
<li>baz</li>
</ul>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #11
0
def test_autolinks_620():
    """
    Test case 620:  (part 6) These are not autolinks:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """*****@*****.**"""
    expected_tokens = [
        "[para(1,1):]", "[text:[email protected]:]", "[end-para]"
    ]
    expected_gfm = """<p>[email protected]</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #12
0
def test_raw_html_642a():
    """
    Test case 642a:  closing tag character without a valid closing tag name
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """</>"""
    expected_tokens = [
        "[para(1,1):]", "[text:\a<\a&lt;\a/\a>\a&gt;\a:]", "[end-para]"
    ]
    expected_gfm = """<p>&lt;/&gt;</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_soft_line_breaks_669():
    """
    Test case 669:  A regular line break (not in a code span or HTML tag) that is not preceded by two or more spaces or a backslash is parsed as a softbreak.
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """foo
baz"""
    expected_tokens = ["[para(1,1):\n]", "[text:foo\nbaz::\n]", "[end-para]"]
    expected_gfm = """<p>foo
baz</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #14
0
def test_fenced_code_blocks_098c():
    """
    Test case 098c:  Modified 98 with less ">" before second line.
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """> ```
aaa

bbb"""
    expected_tokens = [
        "[block-quote(1,1):]",
        "[fcode-block(1,3):`:3::::::]",
        "[end-fcode-block]",
        "[end-block-quote]",
        "[para(2,1):]",
        "[text:aaa:]",
        "[end-para]",
        "[BLANK(3,1):]",
        "[para(4,1):]",
        "[text:bbb:]",
        "[end-para]",
    ]
    expected_gfm = """<blockquote>
<pre><code></code></pre>
</blockquote>
<p>aaa</p>
<p>bbb</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #15
0
def test_paragraph_blocks_194():
    """
    Test case 194: (part a) However, the first line may be indented at most three spaces, or an indented code block will be triggered:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """   aaa
bbb"""
    expected_tokens = ["[para(1,4):   \n]", "[text:aaa\nbbb::\n]", "[end-para]"]
    expected_gfm = """<p>aaa
bbb</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_link_reference_definitions_182():
    """
    Test case 182:  A link reference definition cannot interrupt a paragraph.
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """Foo
[bar]: /baz

[bar]"""
    expected_tokens = [
        "[para(1,1):\n]",
        "[text:Foo\n::\n]",
        "[text:[:]",
        "[text:bar:]",
        "[text:]:]",
        "[text:: /baz:]",
        "[end-para]",
        "[BLANK(3,1):]",
        "[para(4,1):]",
        "[text:[:]",
        "[text:bar:]",
        "[text:]:]",
        "[end-para]",
    ]
    expected_gfm = """<p>Foo
[bar]: /baz</p>
<p>[bar]</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_reference_links_551():
    """
    Test case 551:  (part 2) No whitespace is allowed between the link text and the link label:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """[foo]
[bar]

[bar]: /url "title"
"""
    expected_tokens = [
        "[para(1,1):\n]",
        "[text:[:]",
        "[text:foo:]",
        "[text:]:]",
        "[text:\n::\n]",
        "[link:shortcut:/url:title::::bar]",
        "[text:bar:]",
        "[end-link::]",
        "[end-para]",
        "[BLANK(3,1):]",
        '[link-ref-def(4,1):True::bar:: :/url:: :title:"title":]',
        "[BLANK(5,1):]",
    ]
    expected_gfm = """<p>[foo]
<a href="/url" title="title">bar</a></p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_reference_links_538():
    """
    Test case 538:  (part 1) The link text may contain inline content:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """[link *foo **bar** `#`*][ref]

[ref]: /uri"""
    expected_tokens = [
        "[para(1,1):]",
        "[link:full:/uri::::ref:link *foo **bar** #*]",
        "[text:link :]",
        "[emphasis:1:*]",
        "[text:foo :]",
        "[emphasis:2:*]",
        "[text:bar:]",
        "[end-emphasis::2:*]",
        "[text: :]",
        "[icode-span:#:`::]",
        "[end-emphasis::1:*]",
        "[end-link::]",
        "[end-para]",
        "[BLANK(2,1):]",
        "[link-ref-def(3,1):True::ref:: :/uri:::::]",
    ]
    expected_gfm = """<p><a href="/uri">link <em>foo <strong>bar</strong> <code>#</code></em></a></p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #19
0
def test_fenced_code_blocks_111():
    """
    Test case 111:  Other blocks can also occur before and after fenced code blocks without an intervening blank line:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """foo
---
~~~
bar
~~~
# baz"""
    expected_tokens = [
        "[setext(2,1):-:3::(1,1)]",
        "[text:foo:]",
        "[end-setext::]",
        "[fcode-block(3,1):~:3::::::]",
        "[text:bar:]",
        "[end-fcode-block::3]",
        "[atx(6,1):1:0:]",
        "[text:baz: ]",
        "[end-atx::]",
    ]
    expected_gfm = """<h2>foo</h2>
<pre><code>bar
</code></pre>
<h1>baz</h1>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #20
0
def test_list_items_303():
    """
    Test case 303:  (part 2) A single-paragraph list is tight:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """- a
  - b"""
    expected_tokens = [
        "[ulist(1,1):-::2:]",
        "[para(1,3):]",
        "[text:a:]",
        "[end-para]",
        "[ulist(2,3):-::4:  ]",
        "[para(2,5):]",
        "[text:b:]",
        "[end-para]",
        "[end-ulist]",
        "[end-ulist]",
    ]
    expected_gfm = """<ul>
<li>a
<ul>
<li>b</li>
</ul>
</li>
</ul>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #21
0
def test_tabs_004x():
    """
    Test case 004:  (part a) a continuation paragraph of a list item is indented with a tab; this has exactly the same effect as indentation with four spaces would
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """  - foo

\tbar"""  # noqa: E101,W191
    # noqa: E101,W191
    expected_tokens = [
        "[ulist(1,3):-::4:  :\t]",
        "[para(1,5):]",
        "[text:foo:]",
        "[end-para]",
        "[BLANK(2,1):]",
        "[para(3,2):]",
        "[text:bar:]",
        "[end-para]",
        "[end-ulist]",
    ]
    expected_gfm = """<ul>
<li>
<p>foo</p>
<p>bar</p>
</li>
</ul>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_link_reference_definitions_165():
    """
    Test case 165:  The title may extend over multiple lines:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """[foo]: /url '
title
line1
line2
'

[foo]"""
    expected_tokens = [
        "[link-ref-def(1,1):True::foo:: :/url:: :\ntitle\nline1\nline2\n:'\ntitle\nline1\nline2\n':]",
        "[BLANK(6,1):]",
        "[para(7,1):]",
        "[link:shortcut:/url:\ntitle\nline1\nline2\n::::foo]",
        "[text:foo:]",
        "[end-link::]",
        "[end-para]",
    ]
    expected_gfm = """<p><a href="/url" title="
title
line1
line2
">foo</a></p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_link_reference_definitions_170():
    """
    Test case 170:  The title must be separated from the link destination by whitespace:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """[foo]: <bar>(baz)

[foo]"""
    expected_tokens = [
        "[para(1,1):]",
        "[text:[:]",
        "[text:foo:]",
        "[text:]:]",
        "[text:: :]",
        "[raw-html:bar]",
        "[text:(baz):]",
        "[end-para]",
        "[BLANK(2,1):]",
        "[para(3,1):]",
        "[text:[:]",
        "[text:foo:]",
        "[text:]:]",
        "[end-para]",
    ]
    expected_gfm = """<p>[foo]: <bar>(baz)</p>
<p>[foo]</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #24
0
def test_setext_headings_073():
    """
    Test case 073:  Authors who want interpretation 1 can put a blank line after the first paragraph:
    https://github.github.com/gfm/#example-73
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """Foo

bar
---
baz"""
    expected_tokens = [
        "[para(1,1):]",
        "[text:Foo:]",
        "[end-para]",
        "[BLANK(2,1):]",
        "[setext(4,1):-:3::(3,1)]",
        "[text:bar:]",
        "[end-setext::]",
        "[para(5,1):]",
        "[text:baz:]",
        "[end-para]",
    ]
    expected_gfm = """<p>Foo</p>
<h2>bar</h2>
<p>baz</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #25
0
def test_tabs_002b():
    """
    Test case 002b:  Variation of 002 tested against Babelmark
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """    a simple
      indented code block
---
      a simple
      indented code block"""
    expected_tokens = [
        "[icode-block(1,5):    :\n    ]",
        "[text:a simple\n  indented code block:]",
        "[end-icode-block]",
        "[tbreak(3,1):-::---]",
        "[icode-block(4,5):    :\n    ]",
        "[text:a simple\n  indented code block:  ]",
        "[end-icode-block]",
    ]
    expected_gfm = """<pre><code>a simple
  indented code block
</code></pre>
<hr />
<pre><code>  a simple
  indented code block
</code></pre>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #26
0
def test_inline_links_513():
    """
    Test case 513:  Titles may be in single quotes, double quotes, or parentheses:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """[link](/url "title")
[link](/url 'title')
[link](/url (title))"""
    expected_tokens = [
        "[para(1,1):\n\n]",
        "[link:inline:/url:title::::link]",
        "[text:link:]",
        "[end-link::]",
        "[text:\n::\n]",
        "[link:inline:/url:title::::link]",
        "[text:link:]",
        "[end-link::]",
        "[text:\n::\n]",
        "[link:inline:/url:title::::link]",
        "[text:link:]",
        "[end-link::]",
        "[end-para]",
    ]
    expected_gfm = """<p><a href="/url" title="title">link</a>
<a href="/url" title="title">link</a>
<a href="/url" title="title">link</a></p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #27
0
def test_tabs_004a():
    """
    Test case 004a:  variation on 004
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """  - foo

    bar"""  # noqa: E101,W191
    # noqa: E101,W191
    expected_tokens = [
        "[ulist(1,3):-::4:  :    ]",
        "[para(1,5):]",
        "[text:foo:]",
        "[end-para]",
        "[BLANK(2,1):]",
        "[para(3,5):]",
        "[text:bar:]",
        "[end-para]",
        "[end-ulist]",
    ]
    expected_gfm = """<ul>
<li>
<p>foo</p>
<p>bar</p>
</li>
</ul>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
def test_block_quotes_228():
    """
    Test case 228:  (part 1) It is a consequence of the Laziness rule that any number of initial >s may be omitted on a continuation line of a nested block quote:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """> > > foo
bar"""
    expected_tokens = [
        "[block-quote(1,1):]",
        "[block-quote(1,3):]",
        "[block-quote(1,5):]",
        "[para(1,7):\n]",
        "[text:foo\nbar::\n]",
        "[end-para]",
        "[end-block-quote]",
        "[end-block-quote]",
        "[end-block-quote]",
    ]
    expected_gfm = """<blockquote>
<blockquote>
<blockquote>
<p>foo
bar</p>
</blockquote>
</blockquote>
</blockquote>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #29
0
def test_inline_links_527a():
    """
    Test case 527a:  (part 2) However, links may not contain other links, at any level of nesting.
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """[foo *[bar [baz](/uri1)](/uri2)*](/uri3)"""
    expected_tokens = [
        "[para(1,1):]",
        "[text:[:]",
        "[text:foo :]",
        "[emphasis:1:*]",
        "[text:[:]",
        "[text:bar :]",
        "[link:inline:/uri1:::::baz]",
        "[text:baz:]",
        "[end-link::]",
        "[text:]:]",
        "[text:(/uri2):]",
        "[end-emphasis::1:*]",
        "[text:]:]",
        "[text:(/uri3):]",
        "[end-para]",
    ]
    expected_gfm = (
        """<p>[foo <em>[bar <a href="/uri1">baz</a>](/uri2)</em>](/uri3)</p>"""
    )

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)
Пример #30
0
def test_setext_headings_067():
    """
    Test case 067:  Setext headings cannot be empty:
    """

    # Arrange
    tokenizer = TokenizedMarkdown()
    transformer = TransformToGfm()
    source_markdown = """
===="""
    expected_tokens = [
        "[BLANK(1,1):]", "[para(2,1):]", "[text:====:]", "[end-para]"
    ]
    expected_gfm = """<p>====</p>"""

    # Act
    actual_tokens = tokenizer.transform(source_markdown)
    actual_gfm = transformer.transform(actual_tokens)

    # Assert
    assert_if_lists_different(expected_tokens, actual_tokens)
    assert_if_strings_different(expected_gfm, actual_gfm)
    assert_token_consistency(source_markdown, actual_tokens)