def test_mutate_element_list(): doc = TestDoc() assert not doc.multi_attr assert len(doc.multi_attr) == 0 with raises(IndexError): doc.multi_attr[0] doc.multi_attr.append(TextElement(value='First element')) assert doc.multi_attr assert doc.multi_attr[0].value == 'First element' assert len(doc.multi_attr) == 1 doc.multi_attr.insert(1, TextElement(value='Second element')) assert doc.multi_attr[1].value == 'Second element' assert len(doc.multi_attr) == 2 tree = fromstringlist(write(doc, validate=False)) elements = tree.findall('multi') assert len(elements) == 2 assert elements[0].text == 'First element' assert elements[1].text == 'Second element' doc.multi_attr[0] = TextElement(value='Replacing element') assert doc.multi_attr[0].value == 'Replacing element' assert len(doc.multi_attr) == 2 tree = fromstringlist(write(doc, validate=False)) elements = tree.findall('multi') assert len(elements) == 2 assert elements[0].text == 'Replacing element' assert elements[1].text == 'Second element' del doc.multi_attr[0] assert doc.multi_attr[0].value == 'Second element' assert len(doc.multi_attr) == 1 tree = fromstringlist(write(doc, validate=False)) elements = tree.findall('multi') assert len(elements) == 1 assert elements[0].text == 'Second element'
def test_parse(input_, expected): with open(os.path.join(test_suite_dir, input_), 'rb') as f: xml = f.read() if IRON_PYTHON: xml = bytes(xml) parse = get_format(xml) assert callable(parse) uri_filename = input_.rstrip('.xml') + '.uri.txt' try: with open(os.path.join(test_suite_dir, uri_filename)) as f: base_uri = f.read().strip() except (IOError, OSError): base_uri = 'http://example.com/' parsed_feed, _ = parse(xml, feed_url=base_uri) parsed_tree = fromstringlist( write(parsed_feed, canonical_order=True, hints=False)) if IRON_PYTHON: open_ = functools.partial(io.open, encoding='utf-8') elif PY3 and sys.platform == 'win32': open_ = functools.partial(open, encoding='utf-8') else: open_ = open with open_(os.path.join(test_suite_dir, expected)) as f: expected_tree = fromstringlist(f.read() if IRON_PYTHON else f) compare_tree(expected_tree, parsed_tree)
def test_parse(input_, expected): with open(os.path.join(test_suite_dir, input_), 'rb') as f: xml = f.read() if IRON_PYTHON: xml = bytes(xml) parse = get_format(xml) assert callable(parse) uri_filename = input_.rstrip('.xml') + '.uri.txt' try: with open(os.path.join(test_suite_dir, uri_filename)) as f: base_uri = f.read().strip() except (IOError, OSError): base_uri = 'http://example.com/' parsed_feed, _ = parse(xml, feed_url=base_uri) parsed_tree = fromstringlist( write(parsed_feed, canonical_order=True, hints=False) ) if IRON_PYTHON: open_ = functools.partial(io.open, encoding='utf-8') elif PY3: open_ = functools.partial(open, encoding='utf-8') else: open_ = open with open_(os.path.join(test_suite_dir, expected)) as f: expected_tree = fromstringlist(f.read() if IRON_PYTHON else f) compare_tree(expected_tree, parsed_tree)
def test_write_hints(fx_test_doc): doc, _ = fx_test_doc doc._hints.update({ TestDoc.ns_element_attr: { 'abc': '123', 'def': '456' }, TestDoc.title_attr: { 'ghi': '789', 'jkl': '012' } }) g = write(doc, canonical_order=True) tree = fromstringlist(g) hint_tag = '{' + SCHEMA_XMLNS + '}hint' assert tree[0].tag == hint_tag assert tree[0].attrib['tag'] == 'ns-element' assert tree[0].attrib['tag-xmlns'] == 'http://earthreader.github.io/' assert tree[0].attrib['id'] == 'abc' assert tree[0].attrib['value'] == '123' assert tree[1].tag == hint_tag assert tree[1].attrib['tag'] == 'ns-element' assert tree[1].attrib['tag-xmlns'] == 'http://earthreader.github.io/' assert tree[1].attrib['id'] == 'def' assert tree[1].attrib['value'] == '456' assert tree[2].tag == hint_tag assert tree[2].attrib['tag'] == 'title' assert 'tag-xmlns' not in tree[2].attrib assert tree[2].attrib['id'] == 'ghi' assert tree[2].attrib['value'] == '789' assert tree[3].tag == hint_tag assert tree[3].attrib['tag'] == 'title' assert 'tag-xmlns' not in tree[3].attrib assert tree[3].attrib['id'] == 'jkl' assert tree[3].attrib['value'] == '012'
def test_write_hints(fx_test_doc): doc, _ = fx_test_doc doc._hints.update({ TestDoc.ns_element_attr: {'abc': '123', 'def': '456'}, TestDoc.title_attr: {'ghi': '789', 'jkl': '012'} }) g = write(doc, canonical_order=True) tree = fromstringlist(g) hint_tag = '{' + SCHEMA_XMLNS + '}hint' assert tree[0].tag == hint_tag assert tree[0].attrib['tag'] == 'ns-element' assert tree[0].attrib['tag-xmlns'] == 'http://earthreader.github.io/' assert tree[0].attrib['id'] == 'abc' assert tree[0].attrib['value'] == '123' assert tree[1].tag == hint_tag assert tree[1].attrib['tag'] == 'ns-element' assert tree[1].attrib['tag-xmlns'] == 'http://earthreader.github.io/' assert tree[1].attrib['id'] == 'def' assert tree[1].attrib['value'] == '456' assert tree[2].tag == hint_tag assert tree[2].attrib['tag'] == 'title' assert 'tag-xmlns' not in tree[2].attrib assert tree[2].attrib['id'] == 'ghi' assert tree[2].attrib['value'] == '789' assert tree[3].tag == hint_tag assert tree[3].attrib['tag'] == 'title' assert 'tag-xmlns' not in tree[3].attrib assert tree[3].attrib['id'] == 'jkl' assert tree[3].attrib['value'] == '012'
def test_element_initialize(): doc = TestDoc(title_attr=TextElement(value='Title test'), content_attr=TextElement(value=u('내용 테스트')), attr_attr='Attribute value', text_content_attr='Text content', multi_attr=(TextElement(value='a'), TextElement(value='b'))) assert doc.title_attr.value == 'Title test' assert doc.content_attr.value == u('내용 테스트') assert doc.attr_attr == 'Attribute value' assert doc.text_content_attr == 'Text content' assert len(doc.multi_attr) == 2 assert doc.multi_attr[0].value == 'a' assert doc.multi_attr[1].value == 'b' doc.multi_attr.append(TextElement(value='c')) assert doc.multi_attr[2].value == 'c' assert len(doc.multi_attr) == 3 tree = fromstringlist(write(doc)) assert tree.find('title').text == 'Title test' assert tree.find('content').text == u('내용 테스트') assert tree.attrib['attr'] == 'Attribute value' elements = tree.findall('multi') assert len(elements) == 3 assert elements[0].text == 'a' assert elements[1].text == 'b' assert elements[2].text == 'c'
def apply_timestamp(stage, feed_id, timestamp): with stage: feed = stage.feeds[feed_id] feed.entries[0].read = Mark(marked=True, updated_at=timestamp) assert feed.entries[0].read.updated_at == timestamp written = read(Feed, write(feed, as_bytes=True)) assert written.entries[0].read.updated_at == timestamp, repr((written.entries[0].read.updated_at, timestamp)) stage.feeds[feed_id] = feed
def test_write_test_doc_tree(fx_test_doc): doc, _ = fx_test_doc g = write(doc, canonical_order=True) tree = fromstringlist(g) assert tree.tag == 'test' assert tree.attrib == { 'attr': u'속성 값', 'attr-decoder': 'decoder test' } assert tree[0].tag == 'title' assert not tree[0].attrib assert tree[0].text == u'제목 test' assert tree[1].tag == 'content' assert tree[1].text == 'Content test' assert not tree[1].attrib assert tree[2].tag == tree[3].tag == tree[4].tag == 'multi' assert tree[2].attrib == tree[3].attrib == tree[4].attrib == {} assert tree[2].text == 'a' assert tree[3].text == 'b' assert tree[4].text == 'c' assert tree[5].tag == tree[6].tag == tree[7].tag == 's-multi' assert tree[5].attrib == tree[6].attrib == tree[7].attrib == {} assert tree[5].text == 'a' assert tree[6].text == 'b' assert tree[7].text == 'c' assert tree[8].tag == 'text-content' assert not tree[8].attrib assert tree[8].text == u'텍스트 내용' assert tree[9].tag == tree[10].tag == 'text-multi' assert tree[9].attrib == tree[10].attrib == {} assert tree[9].text == 'a' assert tree[10].text == 'b' assert tree[11].tag == tree[12].tag == tree[13].tag == 's-text-multi' assert tree[11].attrib == tree[12].attrib == tree[13].attrib == {} assert tree[11].text == 'c' assert tree[12].text == 'b' assert tree[13].text == 'a' assert tree[14].tag == 'text-decoder' assert not tree[14].attrib assert tree[14].text == '123.456' assert tree[15].tag == 'text-decoder-decorator' assert not tree[15].attrib assert tree[15].text == '123' assert tree[16].tag == 'text-combined-decoder' assert not tree[16].attrib assert tree[16].text == '1234' assert tree[17].tag == '{http://earthreader.github.io/}ns-element' assert tree[17].attrib == { '{http://earthreader.github.io/}ns-attr': 'namespace attribute value' } assert tree[17].text == 'Namespace test' assert tree[18].tag == '{http://earthreader.github.io/}ns-text' assert not tree[18].attrib assert tree[18].text == 'Namespace test' assert tree[19].tag == 'content-decoder' assert tree[19].text == 'CONTENT DECODER' assert not tree[19].attrib assert len(tree) == 20
def test_write_test_doc_tree(fx_test_doc): doc, _ = fx_test_doc g = write(doc, canonical_order=True) tree = fromstringlist(g) assert tree.tag == 'test' assert tree.attrib == { 'attr': u('속성 값'), 'attr-decoder': 'decoder test' } assert tree[0].tag == 'title' assert not tree[0].attrib assert tree[0].text == u('제목 test') assert tree[1].tag == 'content' assert tree[1].text == 'Content test' assert not tree[1].attrib assert tree[2].tag == tree[3].tag == tree[4].tag == 'multi' assert tree[2].attrib == tree[3].attrib == tree[4].attrib == {} assert tree[2].text == 'a' assert tree[3].text == 'b' assert tree[4].text == 'c' assert tree[5].tag == tree[6].tag == tree[7].tag == 's-multi' assert tree[5].attrib == tree[6].attrib == tree[7].attrib == {} assert tree[5].text == 'a' assert tree[6].text == 'b' assert tree[7].text == 'c' assert tree[8].tag == 'text-content' assert not tree[8].attrib assert tree[8].text == u('텍스트 내용') assert tree[9].tag == tree[10].tag == 'text-multi' assert tree[9].attrib == tree[10].attrib == {} assert tree[9].text == 'a' assert tree[10].text == 'b' assert tree[11].tag == tree[12].tag == tree[13].tag == 's-text-multi' assert tree[11].attrib == tree[12].attrib == tree[13].attrib == {} assert tree[11].text == 'c' assert tree[12].text == 'b' assert tree[13].text == 'a' assert tree[14].tag == 'text-decoder' assert not tree[14].attrib assert tree[14].text == '123.456' assert tree[15].tag == 'text-decoder-decorator' assert not tree[15].attrib assert tree[15].text == '123' assert tree[16].tag == 'text-combined-decoder' assert not tree[16].attrib assert tree[16].text == '1234' assert tree[17].tag == '{http://earthreader.github.io/}ns-element' assert tree[17].attrib == { '{http://earthreader.github.io/}ns-attr': 'namespace attribute value' } assert tree[17].text == 'Namespace test' assert tree[18].tag == '{http://earthreader.github.io/}ns-text' assert not tree[18].attrib assert tree[18].text == 'Namespace test' assert tree[19].tag == 'content-decoder' assert tree[19].text == 'CONTENT DECODER' assert not tree[19].attrib assert len(tree) == 20
def test_rss_parser(): my_opener = urllib2.build_opener(TestHTTPHandler) urllib2.install_opener(my_opener) crawled_feed, data_for_crawl = rss2.parse_rss( rss_xml, 'http://sourcetest.com/rss.xml' ) feed = read(Feed, write(crawled_feed, as_bytes=True)) assert crawled_feed.id == feed.id title = crawled_feed.title assert title.type == feed.title.type assert title.value == feed.title.value links = crawled_feed.links assert links[1].mimetype == feed.links[1].mimetype assert links[1].relation == feed.links[1].relation assert links[1].uri == feed.links[1].uri rights = crawled_feed.rights assert rights.type == feed.rights.type assert rights.value == feed.rights.value contributors = crawled_feed.contributors assert contributors[0].name == feed.contributors[0].name assert contributors[0].email == feed.contributors[0].email assert contributors[1].name == feed.contributors[1].name assert contributors[1].email == feed.contributors[1].email updated_at = crawled_feed.updated_at assert updated_at == feed.updated_at categories = crawled_feed.categories assert categories[0].term == feed.categories[0].term entries = crawled_feed.entries assert entries[0].title.type == feed.entries[0].title.type assert entries[0].title.value == feed.entries[0].title.value assert entries[0].links[0].mimetype == feed.entries[0].links[0].mimetype assert entries[0].links[0].relation == feed.entries[0].links[0].relation assert entries[0].links[0].uri == feed.entries[0].links[0].uri assert entries[0].content.value == feed.entries[0].content.value assert entries[0].authors[0].name == feed.entries[0].authors[0].name assert entries[0].authors[0].email == feed.entries[0].authors[0].email assert entries[0].links[1].mimetype == feed.entries[0].links[1].mimetype assert entries[0].links[1].uri == feed.entries[0].links[1].uri assert entries[0].id == feed.entries[0].id assert (entries[0].published_at == entries[0].updated_at == feed.entries[0].published_at == feed.entries[0].updated_at) assert data_for_crawl == { 'lastBuildDate': datetime.datetime(2002, 9, 7, 0, 0, 1, tzinfo=utc), 'ttl': '10', } source = entries[0].source assert source.title.type == feed.entries[0].source.title.type assert source.title.value == feed.entries[0].source.title.value assert source.links[1].mimetype == feed.entries[0].source.links[1].mimetype assert source.links[1].uri == feed.entries[0].source.links[1].uri assert source.links[1].relation == feed.entries[0].source.links[1].relation assert source.subtitle.type == feed.entries[0].source.subtitle.type assert source.subtitle.value == feed.entries[0].source.subtitle.value assert not source.entries
def apply_timestamp(stage, feed_id, timestamp): with stage: feed = stage.feeds[feed_id] feed.entries[0].read = Mark(marked=True, updated_at=timestamp) assert feed.entries[0].read.updated_at == timestamp written = read(Feed, write(feed, as_bytes=True)) assert written.entries[0].read.updated_at == timestamp, repr( (written.entries[0].read.updated_at, timestamp)) stage.feeds[feed_id] = feed
def test_write_xmlns_doc(fx_xmlns_doc): doc = fx_xmlns_doc g = write(doc, indent=' ', canonical_order=True) assert ''.join(g) == text_type('''\ <?xml version="1.0" encoding="utf-8"?> <ns0:nstest xmlns:ns0="http://earthreader.github.io/"\ xmlns:ns1="https://github.com/earthreader/libearth"> <ns0:samens>Same namespace</ns0:samens> <ns1:otherns>Other namespace</ns1:otherns> </ns0:nstest>''')
def test_write_subscription_with_ascii_title(): rss = rss_template_with_title.format('english') feed, _ = parse_rss2(rss) feed.id = 'id' sublist = SubscriptionList() sublist.subscribe(feed) g = write(sublist) assert ''.join(g)
def test_write_subscription_with_ascii_title(): rss = rss_template_with_title.format('english') feed, _ = parse_rss(rss) feed.id = 'id' sublist = SubscriptionList() sublist.subscribe(feed) g = write(sublist) assert ''.join(g)
def test_write_xmlns_doc(fx_xmlns_doc): doc = fx_xmlns_doc g = write(doc, indent=' ', canonical_order=True) assert ''.join(g) == text_type('''\ <?xml version="1.0" encoding="utf-8"?> <ns0:nstest xmlns:ns0="http://earthreader.github.io/"\ xmlns:libearth="http://earthreader.org/schema/"\ xmlns:ns1="https://github.com/earthreader/libearth"> <ns0:samens>Same namespace</ns0:samens> <ns1:otherns>Other namespace</ns1:otherns> </ns0:nstest>''')
def test_rss_parser(): my_opener = urllib2.build_opener(TestHTTPHandler) urllib2.install_opener(my_opener) crawled_feed, data_for_crawl = parse_rss(rss_xml, 'http://sourcetest.com/rss.xml') feed = read(Feed, write(crawled_feed, as_bytes=True)) assert crawled_feed.id == feed.id title = crawled_feed.title assert title.type == feed.title.type assert title.value == feed.title.value links = crawled_feed.links assert links[1].mimetype == feed.links[1].mimetype assert links[1].relation == feed.links[1].relation assert links[1].uri == feed.links[1].uri rights = crawled_feed.rights assert rights.type == feed.rights.type assert rights.value == feed.rights.value contributors = crawled_feed.contributors assert contributors[0].name == feed.contributors[0].name assert contributors[0].email == feed.contributors[0].email assert contributors[1].name == feed.contributors[1].name assert contributors[1].email == feed.contributors[1].email updated_at = crawled_feed.updated_at assert updated_at == feed.updated_at categories = crawled_feed.categories assert categories[0].term == feed.categories[0].term entries = crawled_feed.entries assert entries[0].title.type == feed.entries[0].title.type assert entries[0].title.value == feed.entries[0].title.value assert entries[0].links[0].mimetype == feed.entries[0].links[0].mimetype assert entries[0].links[0].relation == feed.entries[0].links[0].relation assert entries[0].links[0].uri == feed.entries[0].links[0].uri assert entries[0].content.value == feed.entries[0].content.value assert entries[0].authors[0].name == feed.entries[0].authors[0].name assert entries[0].authors[0].email == feed.entries[0].authors[0].email assert entries[0].links[1].mimetype == feed.entries[0].links[1].mimetype assert entries[0].links[1].uri == feed.entries[0].links[1].uri assert entries[0].id == feed.entries[0].id assert (entries[0].published_at == entries[0].updated_at == feed.entries[0].published_at == feed.entries[0].updated_at) assert data_for_crawl == { 'lastBuildDate': datetime.datetime(2002, 9, 7, 0, 0, 1, tzinfo=utc), 'ttl': '10', } source = entries[0].source assert source.title.type == feed.entries[0].source.title.type assert source.title.value == feed.entries[0].source.title.value assert source.links[1].mimetype == feed.entries[0].source.links[1].mimetype assert source.links[1].uri == feed.entries[0].source.links[1].uri assert source.links[1].relation == feed.entries[0].source.links[1].relation assert source.subtitle.type == feed.entries[0].source.subtitle.type assert source.subtitle.value == feed.entries[0].source.subtitle.value assert not source.entries
def test_write_subscription_with_nonascii_title(): '''SubscriptionList convert the feed title to :class:`str`, and :class:`write` try to encode the title in utf8. When non-ascii characters are in the title, UnicodeDecodeError is raised. ''' rss = rss_template_with_title.format('한글') feed, _ = parse_rss2(rss) feed.id = 'id' sublist = SubscriptionList() sublist.subscribe(feed) g = write(sublist) assert ''.join(g)
def test_write_subscription_with_nonascii_title(): '''SubscriptionList convert the feed title to :class:`str`, and :class:`write` try to encode the title in utf8. When non-ascii characters are in the title, UnicodeDecodeError is raised. ''' rss = rss_template_with_title.format('한글') feed, _ = parse_rss(rss) feed.id = 'id' sublist = SubscriptionList() sublist.subscribe(feed) g = write(sublist) assert ''.join(g)
def test_mutate_read_element_list(fx_test_doc): doc, _ = fx_test_doc doc.multi_attr.insert(2, TextElement(value='inserted')) assert doc.multi_attr[0].value == 'a' assert doc.multi_attr[1].value == 'b' assert doc.multi_attr[2].value == 'inserted' assert doc.multi_attr[3].value == 'c' assert len(doc.multi_attr) == 4 tree = fromstringlist(write(doc)) elements = tree.findall('multi') assert len(elements) == 4 assert elements[0].text == 'a' assert elements[1].text == 'b' assert elements[2].text == 'inserted' assert elements[3].text == 'c'
def test_validate_recurse(element, recur_valid, valid): assert validate(element, recurse=True, raise_error=False) is recur_valid try: validate(element, recurse=True, raise_error=True) except IntegrityError: assert not recur_valid else: assert recur_valid assert validate(element, recurse=False, raise_error=False) is valid try: validate(element, recurse=False, raise_error=True) except IntegrityError: assert not valid else: assert valid try: for _ in write(element): pass except IntegrityError: assert not recur_valid else: assert recur_valid
def test_attribute_codec(): doc = CodecTestDoc(attr=(1, 2)) tree = fromstringlist(write(doc)) assert tree.attrib['attr'] == '1,2' doc2 = read(CodecTestDoc, etree_tobyteslist(tree)) assert doc2.attr == (1, 2)
def test_write_test_doc(fx_test_doc): doc, _ = fx_test_doc gf = lambda: write(doc, indent=' ', canonical_order=True, hints=False) print(''.join(gf())) assert ''.join(gf()) == '''\
def test_text_codec(): doc = CodecTestDoc(text=(3, 4)) tree = fromstringlist(write(doc)) assert tree.find('text').text == '3,4' doc2 = read(CodecTestDoc, etree_tobyteslist(tree)) assert doc2.text == (3, 4)
def test_write_none_text(): doc = CodecTestDoc(attr=(1, 2), text=None) tree = fromstringlist(write(doc)) assert tree.find('text') is None assert tree.attrib['attr'] == '1,2'
def test_attribute_encode_error(): doc = EncodeErrorDoc(attr=True) with raises(EncodeError): for _ in write(doc): pass
logging.basicConfig(level=logging.DEBUG) formats = {} for filename in missing_inputs: print(filename) with open(os.path.join(test_suite_dir, filename)) as f: xml = f.read() try: parse = get_format(xml) except Exception: print('Failed to detect the format of', filename, file=sys.stderr) raise uri_filename = filename.rstrip('.xml') + '.uri.txt' try: with open(os.path.join(test_suite_dir, uri_filename)) as f: base_uri = f.read().strip() except (IOError, OSError): base_uri = 'http://example.com/' try: feed, _ = parse(xml, feed_url=base_uri) except Exception: print('Failed to parse', filename, file=sys.stderr) raise out_filename = filename.rstrip('.xml') + '.out.xml' try: expected = ''.join(write(feed, canonical_order=True, hints=False)) with open(os.path.join(test_suite_dir, out_filename), 'w') as f: f.write(expected) except Exception: print('Failed to write', out_filename, file=sys.stderr) raise
def test_atom_parser(): url = 'http://vio.atomtest.com/feed/atom' crawled_feed, _ = parse_atom(atom_xml, url) feed = read(Feed, write(crawled_feed, as_bytes=True)) title = crawled_feed.title assert title.type == feed.title.type assert title.value == feed.title.value subtitle = crawled_feed.subtitle assert subtitle.type == feed.subtitle.type assert subtitle.value == feed.subtitle.value links = crawled_feed.links assert links[0].relation == feed.links[0].relation assert links[0].mimetype == feed.links[0].mimetype assert links[0].uri == feed.links[0].uri assert links[1].relation == feed.links[1].relation assert links[1].mimetype == feed.links[1].mimetype assert links[1].uri == feed.links[1].uri authors = crawled_feed.authors assert authors[0].name == feed.authors[0].name assert authors[0].email == feed.authors[0].email categories = crawled_feed.categories assert categories[0].term == feed.categories[0].term contributors = crawled_feed.contributors assert contributors[0].name == feed.contributors[0].name generator = crawled_feed.generator assert generator.uri == feed.generator.uri assert generator.value == feed.generator.value icon = crawled_feed.icon assert icon == feed.icon logo = crawled_feed.logo assert logo == feed.logo rights = crawled_feed.rights assert rights.type == feed.rights.type assert rights.value == feed.rights.value updated_at = crawled_feed.updated_at assert updated_at == feed.updated_at entries = crawled_feed.entries assert entries[0].id == feed.entries[0].id assert entries[0].authors[0].name == feed.entries[0].authors[0].name assert entries[0].title.type == feed.entries[0].title.type assert entries[0].title.value == feed.entries[0].title.value assert entries[0].links[0].relation == feed.entries[0].links[0].relation assert entries[0].links[0].uri == feed.entries[0].links[0].uri assert entries[0].updated_at == feed.entries[0].updated_at assert entries[0].published_at == feed.entries[0].published_at assert entries[0].categories[0].scheme_uri == \ feed.entries[0].categories[0].scheme_uri assert entries[0].categories[0].term == feed.entries[0].categories[0].term assert entries[0].categories[1].scheme_uri == \ feed.entries[0].categories[1].scheme_uri assert entries[0].categories[1].term == feed.entries[0].categories[1].term assert entries[0].content.type == feed.entries[0].content.type assert entries[0].content.value == feed.entries[0].content.value assert entries[0].content.source_uri == feed.entries[0].content.source_uri assert entries[0].summary.type == feed.entries[0].summary.type assert entries[0].summary.value == feed.entries[0].summary.value assert entries[1].id == feed.entries[1].id assert entries[1].authors[0].name == feed.entries[1].authors[0].name assert entries[1].title.type == feed.entries[1].title.type assert entries[1].title.value == feed.entries[1].title.value assert entries[1].updated_at == feed.entries[1].updated_at assert entries[2].id == feed.entries[2].id assert entries[2].authors[0].name == feed.entries[2].authors[0].name assert entries[2].title.type == feed.entries[2].title.type assert entries[2].title.value == feed.entries[2].title.value assert entries[2].updated_at == feed.entries[2].updated_at source = entries[2].source feed_source = feed.entries[2].source assert source.authors[0].name == feed_source.authors[0].name assert source.categories[0].term == feed_source.categories[0].term assert source.contributors[0].name == feed_source.contributors[0].name assert source.links[0] == feed_source.links[0] assert source.id == feed_source.id assert source.generator == feed_source.generator assert source.icon == feed_source.icon assert source.logo == feed_source.logo assert source.rights == feed_source.rights assert source.subtitle == feed_source.subtitle
def test_content_encode_error(): doc = ContentEncodeErrorDoc() with raises(EncodeError): for _ in write(doc): pass
def test_write_none_attribute(): doc = CodecTestDoc(attr=None, text=(1, 2)) tree = fromstringlist(write(doc)) assert tree.find('text').text == '1,2' assert 'attr' not in tree.attrib
def test_write_test_doc(fx_test_doc): doc, _ = fx_test_doc g = write(doc, indent=' ', canonical_order=True) assert ''.join(g) == '''\
def test_write_test_doc(fx_test_doc): doc, _ = fx_test_doc result = write(doc, indent=' ', canonical_order=True, hints=False) print(''.join(result)) assert ''.join(result) == '''\
def test_content_codec(): doc = ContentCodecTestDoc(c=(5, 6)) tree = fromstringlist(write(doc, as_bytes=True)) assert tree.text == '5,6' doc2 = read(ContentCodecTestDoc, etree_tobyteslist(tree)) assert doc2.c == (5, 6)
def test_text_encode_error(): doc = EncodeErrorDoc(text=True) with raises(EncodeError): for _ in write(doc): pass