def test_convert_text_and_a_tag(self): """test_convert_text_and_a_tag.""" res = slate_to_html([{ "children": [ { "text": "Hello " }, { "children": [{ "text": "world" }], "type": "strong" }, { "text": " mixed " }, { "children": [{ "text": "content" }], "type": "i" }, { "text": "." }, ], "type": "p", }]) self.assertEqual( res, "<p>Hello <strong>world</strong> mixed <i>content</i>.</p>", )
def test_convert_slate_output_markup(self): """test_convert_slate_output_markup.""" slate = read_json("5.json") res = slate_to_html(slate).strip() html = read_data("5.html").strip() self.assertEqual(res, html)
def test_convert_simple_paragraph(self): """test_convert_simple_paragraph.""" res = slate_to_html([{ "children": [{ "text": "Hello world" }], "type": "p" }]) self.assertEqual(res, "<p>Hello world</p>")
def test_convert_simple_paragraph_multi_breaks(self): """test_convert_simple_paragraph_multi_breaks.""" res = slate_to_html([{ "children": [{ "text": "Hello \nworld \n in a multi line \nparagraph" }], "type": "p", }]) self.assertEqual( res, "<p>Hello <br>world <br> in a multi line <br>paragraph</p>")
def test_convert_case_multiple_p(self): """test_convert_case_multiple_p.""" slate = read_json("2.json") html = slate_to_html(slate) self.assertEqual( html, "<p>Since version 2.0, lxml comes with a dedicated Python package " "for dealing with HTML: lxml.html. <br>It is based on lxml's HTML " "parser, but provides a special Element API for HTML elements, as " "well as a number of utilities for common HTML processing tasks." "</p><p>The normal HTML parser is capable of handling broken HTML," " but for pages that are far enough from HTML to call them " "'tag soup', it may still fail to parse the page in a useful way. " "A way to deal with this is ElementSoup, which deploys the " "well-known BeautifulSoup parser to build an lxml HTML tree.</p>" "<p>However, note that the most common problem with web pages is " "the lack of (or the existence of incorrect) encoding declarations." " It is therefore often sufficient to only use the encoding " "detection of BeautifulSoup, called UnicodeDammit, and to leave " "the rest to lxml's own HTML parser, which is several times faster." "</p>", )
def test_slate_list(self): """test_slate_list.""" slate = read_json("6.json") res = slate_to_html(slate).strip() html = read_data("6-1.html").strip() self.assertEqual(res, html)