def get_feed(feed_id): feed_id = secure_filename(feed_id) feed_config_filepath = os.path.join(APP_CONFIG_FEEDS, feed_id+".json") if not os.path.isfile(feed_config_filepath): print feed_config_filepath abort(404) feed = feedops.FusedFeed.load_from_spec_file(feed_config_filepath) feed.fetch() feed_uri = request.url_root if len(feed.sources) == 1: # if there is only 1 source in a fusedfeed # just give the feed's html alternate # TODO: instead, we should generate our own HTML representation feed_uri = feed.sources[0].html_uri output = AtomFeed(feed.name, feed_url=request.url, author="FeedFuser", links=[{"href":feed_uri, "rel":"alternate", "type":"text/html"}]) for entry in feed.entries: title = entry.title if not entry.title: title = entry.link feed_item = FeedEntry(id=entry.guid, title=title, updated=entry.update_date, author=entry.author, published=entry.pub_date, links=[{"href":entry.link, "rel":"alternate", "type":"text/html"}]) if entry.summary: feed_item.summary = unicode(entry.summary) feed_item.summary_type = "text" if entry.summary_type == "text/plain" else "html" if entry.content: feed_item.content = unicode(entry.content) feed_item.content_type = "text" if entry.content_type == "text/plain" else "html" output.add(feed_item) return output.get_response()
def test_parse_atom(session, feed): parser = RssParser() parser.feed = feed authors = [ dict(name="John Doe", email="*****@*****.**", uri="http://test.com/testauthor"), dict( name="Jane Smith", email="*****@*****.**", uri="http://test.com/testauthor2" ), ] atomfeed = AtomFeed(title="Test Feed", feed_url="http://testfeed.com") entry = FeedEntry( title="Jane Smith", url="http://testfeed.com/testentry", id="1234", updated=datetime.utcnow(), content="Test Entry", author=authors, ) atomfeed.entries.append(entry) parser.data = atomfeed.to_string() parser.parse() session.commit() parsed_authors = parser.authors assert len(parser.entries) == 1 assert len(parsed_authors) == 2 assert parser.len_new_entries == 1 assert parser.len_updated_entries == 0 assert next((e for e in parser.entries if e.guid == "1234"), None) for a in authors: r = next((r for r in parsed_authors if r.name == a["name"]), None) assert r is not None assert r.name == a["name"] assert r.email == a["email"] assert r.url == a["uri"] entry2 = FeedEntry( title="New Entry", url="http://testfeed.com/newentry", id="98765", updated=datetime.utcnow(), content="New Entry", author=authors, ) atomfeed.entries.append(entry2) # Parse feed again, should have 1 new entry and no updated entries parser = RssParser() parser.feed = feed parser.data = atomfeed.to_string() parser.parse() parsed_authors = parser.authors assert len(parser.entries) == 1 assert len(parsed_authors) == 2 assert parser.len_new_entries == 1 assert parser.len_updated_entries == 0 assert next((e for e in parser.entries if e.guid == "98765"), None) # Update entry content and parse feed again, should have 1 updated entry and no new entries entry.content = "Updated Test Entry" parser = RssParser() parser.feed = feed parser.data = atomfeed.to_string() parser.parse() parsed_authors = parser.authors assert len(parser.entries) == 1 assert len(parsed_authors) == 0 assert parser.len_new_entries == 0 assert parser.len_updated_entries == 1 assert next((e for e in parser.entries if e.guid == "1234"), None)