Esempio n. 1
0
class TestBlockLexer:
    def setup(self):
        self.bl = BlockLexer()

    def test_call(self):
        result = self.bl("Words")
        expected = [{"type": "paragraph", "text": "Words"}]
        assert result == expected

    def test_parse(self):
        result = self.bl.parse("\n## Cat\nWords")
        expected = [
            {"type": "header", "hlevel": 2, "title": "Cat"},
            {"type": "paragraph", "text": "Words"},
        ]
        assert result == expected

    def test_tokenize_header(self):
        regex = BlockGrammar.header
        r = regex.match("## Cat")
        self.bl.tokenize_header(r)
        expected = [{"type": "header", "hlevel": 2, "title": "Cat"}]
        assert self.bl.tokens == expected

    def test_tokenize_unordered_list(self):
        regex = BlockGrammar.unordered_list
        m = regex.match("* Cat\n")
        self.bl.tokenize_unordered_list(m)
        expected = [{"type": "unordered_list", "text": "Cat"}]
        assert self.bl.tokens == expected

    def test_tokenize_table(self):
        regex = BlockGrammar.table
        m = regex.match(
            "First|Second Col\n"
            + "-------|--------\n"
            + "foo 2  |  bar   \n"
            + "baz    | cat    \n"
        )
        self.bl.tokenize_table(m)
        expected = [
            {
                "type": "table",
                "header": ["First", "Second Col"],
                "body": [["foo 2", "bar"], ["baz", "cat"]],
            }
        ]
        assert self.bl.tokens == expected

    def test_tokenize_text(self):
        regex = BlockGrammar.text
        r = regex.match("Words\n")
        self.bl.tokenize_text(r)
        expected = [{"type": "paragraph", "text": "Words"}]
        assert self.bl.tokens == expected
Esempio n. 2
0
class TestBlockLexer:

    def setup(self):
        self.bl = BlockLexer()

    def test_call(self):
        result = self.bl('Words')
        expected = [{'type': 'paragraph', 'text': 'Words'}]
        assert result == expected

    def test_parse(self):
        result = self.bl.parse('\n## Cat\nWords')
        expected = [
            {'type': 'header', 'hlevel': 2, 'title': 'Cat'},
            {'type': 'paragraph', 'text': 'Words'},
        ]
        assert result == expected

    def test_tokenize_header(self):
        regex = BlockGrammar.header
        r = regex.match('## Cat')
        self.bl.tokenize_header(r)
        expected = [{'type': 'header', 'hlevel': 2, 'title': 'Cat'}]
        assert self.bl.tokens == expected

    def test_tokenize_unordered_list(self):
        regex = BlockGrammar.unordered_list
        m = regex.match('* Cat\n')
        self.bl.tokenize_unordered_list(m)
        expected = [{'type': 'unordered_list', 'text': 'Cat'}]
        assert self.bl.tokens == expected

    def test_tokenize_table(self):
        regex = BlockGrammar.table
        m = regex.match(
            'First|Second Col\n' +
            '-------|--------\n' +
            'foo 2  |  bar   \n' +
            'baz    | cat    \n'
        )
        self.bl.tokenize_table(m)
        expected = [{
            'type': 'table',
            'header': ['First', 'Second Col'],
            'body': [['foo 2', 'bar'], ['baz', 'cat']],
        }]
        assert self.bl.tokens == expected

    def test_tokenize_text(self):
        regex = BlockGrammar.text
        r = regex.match('Words\n')
        self.bl.tokenize_text(r)
        expected = [{'type': 'paragraph', 'text': 'Words'}]
        assert self.bl.tokens == expected
Esempio n. 3
0
 def setup(self):
     self.bl = BlockLexer()