def render_atom(self): fg = FeedGenerator() fg.id(self.site_url) fg.title(self.site_title) fg.link(href = self.site_url,rel = 'alternate') fg.link(href = self.site_url + 'atom.xml',rel = 'self') fg.language('zh-cn') link_list = ArticleManager.sharedManager().link_list() for link in link_list: article = ArticleManager.sharedManager().article_for_link(link) if not article: continue fe = fg.add_entry() fe.id(article.article_link) fe.link(link = {'href':self.site_url + article.article_link}) fe.title(article.article_title) fe.description(article.article_subtitle or '') fe.author(name = article.author or '', email = article.author_email or '') d = datetime.strptime(article.article_publish_date,'%Y-%m-%d') pubdate = datetime(year = d.year, month = d.month, day = d.day,tzinfo = UTC(8)) fe.pubdate(pubdate) article.render_content_html() fe.content(content = article._content_html, type = 'html') atom_feed = fg.atom_str(pretty = True) return atom_feed
def main(): session = vk.Session() api = vk.API(session) group_id = '96469126' group_info = api.groups.getById(group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid']) assert len(group_info) == 1 group_info = group_info[0] url = 'http://vk.com/club{}'.format(group_info['gid']) # a = api.wall.get(owner_id=-1 * group_info['gid']) # # with open('out', 'wb') as fio: # pickle.dump(a, fio) with open('out', 'rb') as fio: data = pickle.loads(fio.read()) assert len(data) > 1 fg = FeedGenerator() fg.id(url) fg.title(_(group_info['name'])) fg.description(_(group_info['description'])) fg.logo(group_info['photo']) site_url = group_info.get('site', url) if group_info.get('site', url) else url fg.link(href=_(site_url)) fg.link(href=_(site_url), rel='self') fg.link(href=_(site_url), rel='alternate') fg.author({'name': 'Alexander Sapronov', 'email': '*****@*****.**'}) fg.webMaster('[email protected] (Alexander Sapronov)') pat = re.compile(r"#(\w+)") for x in data[1:]: post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id']) e = fg.add_entry() # text = x.get('text', '').replace('<br>', '\n') text = x.get('text', '') e.description(_(text)) e.author({'name': _(get_author_name(api, x.get('from_id')))}) e.id(post_link) e.link(href=_(post_link)) e.link(href=_(post_link), rel='alternate') tags = pat.findall(text) title = x.get('text', '') for tag in tags: e.category(term=_(tag)) title = title.replace('#{}'.format(tag), '') title = re.sub('<[^<]+?>', ' ', title) title = textwrap.wrap(title, width=80)[0] e.title(_(title.strip())) fg.rss_file('rss.xml')
def latestRss(userID): userID = userID.lower() shows = {} episodes = [] today = date.today().strftime('%Y-%m-%d') for showID in series.getUserShowList(userID): shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True) episodes.extend((showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today) episodes.sort(key=episodeAirdateKey, reverse=True) feed = FeedGenerator() feed.id(userID) feed.title('%s\'s shows' % userID) feed.description('Unseen episodes') feed.link(href=request.url_root) feed.language('en') for showID, episode in episodes: entry = feed.add_entry() entry.id('%s/%s' % (showID, episode['episode_id'])) entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title'])) return feed.rss_str(pretty=True)
def generate_feed(page=1): feed = FeedGenerator() feed.id("https://pub.dartlang.org/feed.atom") feed.title("Pub Packages for Dart") feed.link(href="https://pub.dartlang.org/", rel="alternate") feed.link(href="https://pub.dartlang.org/feed.atom", rel="self") feed.description("Last Updated Packages") feed.author({"name": "Dart Team"}) i = 1 pager = QueryPager(int(page), "/feed.atom?page=%d", Package.all().order('-updated'), per_page=10) for item in pager.get_items(): i += 1 entry = feed.add_entry() for author in item.latest_version.pubspec.authors: entry.author({"name": author[0]}) entry.title("v" + item.latest_version.pubspec.get("version") +\ " of " + item.name) entry.link(link={"href": item.url, "rel": "alternate", "title": item.name}) entry.id( "https://pub.dartlang.org/packages/" + item.name + "#" +\ item.latest_version.pubspec.get("version")) entry.description( item.latest_version.pubspec .get("description", "Not Available")) readme = item.latest_version.readme if not readme is None: entry.content(item.latest_version.readme.render(), type='html') else: entry.content("<p>No README Found</p>", type='html') return feed
def gen_feed(user, base_url, path, debug=False): # Create feed feed = FeedGenerator() feed.id(urlparse.urljoin(base_url, user + '.xml')) feed.title('Snapchat story for ' + user) feed.link( href=urlparse.urljoin(base_url, user + '.xml'), rel='self' ) feed.language('en') feed.description('Snapchat media') # Iterate through files in path, sort by unix timestamp (newest first), then add to feed files = sorted(os.listdir(path), reverse=True) for filename in files: split = filename.split('~') if split[0] != user: continue if os.path.splitext(filename)[1] in ['.mp4', '.jpg']: entry = feed.add_entry() entry.id(urlparse.urljoin(base_url, filename)) entry.link(href=urlparse.urljoin(base_url, filename)) entry.title(filename) # Write feed to disk feed.rss_file(os.path.join(path, user + '.xml')) date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") if debug: print('{0} Regenerated {1}'.format(date, urlparse.urljoin(base_url, user + '.xml')))
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz): """Create the podcast file.""" fg = FeedGenerator() fg.load_extension('podcast') url = "{}{}.xml".format(base_public_url, show.id) fg.id(url.split('.')[0]) fg.title(show.name) fg.image(show.image_url) fg.description(show.description) fg.link(href=url, rel='self') # collect all mp3s for the given show all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id))) for filepath in all_mp3s: filename = os.path.basename(filepath) mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz) mp3_size = os.stat(filepath).st_size mp3_url = base_public_url + filename mp3_id = filename.split('.')[0] title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date) # build the rss entry fe = fg.add_entry() fe.id(mp3_id) fe.pubdate(mp3_date) fe.title(title) fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
def _filter_fb_rss_feeed(url): parsed_feed = feedparser.parse(url) filtered_entries = filter( lambda x: ' shared a link: "' in x.title, parsed_feed.entries) fg = FeedGenerator() fg.id('https://fb-notifications-to-pocket.herokuapp.com/') fg.title('Facebook Notifications to Pocket') fg.author({'name': 'Pankaj Singh', 'email': '*****@*****.**'}) fg.description( '''Filter FB notifications which contain a link and generate a new rss feed which will be used by IFTTT''') fg.link(href='https://fb-notifications-to-pocket.herokuapp.com/') for entry in filtered_entries: root = etree.HTML(entry.summary_detail.value) title = entry.title.split(" shared a link: ")[1].strip()[1:-2] author_name = entry.title.split(" shared a link: ")[0].strip() url = urlparse.parse_qs( urlparse.urlparse(root.findall(".//a")[-1].attrib["href"]).query)["u"][0] title = get_title_for_url(url) or title fe = fg.add_entry() fe.id(entry.id) fe.link(href=url) fe.published(entry.published) fe.author({'name': author_name}) fe.title(title) return fg.atom_str(pretty=True)
class Feeder(): def __init__( self, url, title='', feedURL='' ): scraper = None if url.startswith( "https://twitter.com/" ): scraper = TwitterScraper( url ) if title == '': title = "Twitter: @" + url.split('/')[3] elif url.startswith( "http://www.lindwurm-linden.de/termine" ): scraper = LindwurmScraper( url ) if title == '': title = "Lindwurm: Termine" else: raise UnsupportedService( "No scraper found for this URL." ) self.feed = FeedGenerator() self.feed.id( url ) self.feed.title( title ) self.feed.author( { "name": url } ) if feedURL != '': self.feed.link( href=feedURL, rel='self' ) for entry in scraper.entries: fe = self.feed.add_entry() fe.id( entry['url'] ) fe.title( entry['title'] ) fe.link( href=entry['url'], rel='alternate' ) fe.content( entry['text'] ) def GetAtom( self ): return self.feed.atom_str( pretty=True ).decode()
class TestExtensionDc(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('dc') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_entryLoadExtension(self): fe = self.fg.add_item() try: fe.load_extension('dc') except ImportError: pass # Extension already loaded def test_elements(self): for method in dir(self.fg.dc): if method.startswith('dc_'): m = getattr(self.fg.dc, method) m(method) assert m() == [method] self.fg.id('123') assert self.fg.atom_str() assert self.fg.rss_str()
def main(): client = moduleSocial.connectTumblr() posts = client.posts('fernand0') fg = FeedGenerator() fg.id(posts['blog']['url']) fg.title(posts['blog']['title']) fg.author( {'name':posts['blog']['name'],'email':'*****@*****.**'} ) fg.link( href=posts['blog']['url'], rel='alternate' ) fg.subtitle('Alternate feed due to Tumblr GDPR restrictions') fg.language('en') print(len(posts['posts'])) for i in range(len(posts['posts'])): fe = fg.add_entry() print(posts['posts'][i]['post_url']) if 'title' in posts['posts'][i]: title = posts['posts'][i]['title'] print('T', posts['posts'][i]['title']) else: title = posts['posts'][i]['summary'].split('\n')[0] print('S', posts['posts'][i]['summary'].split('\n')[0]) fe.title(title) fe.link(href=posts['posts'][i]['post_url']) fe.id(posts['posts'][i]['post_url']) print(fg.atom_file('/var/www/html/elmundoesimperfecto/tumblr.xml')) sys.exit()
def get_feed(atom=False): fg = FeedGenerator() domain = get_domain() items = get_posts({"limit": "10"}, full=True)["results"] fg.id("http://%s/"%domain) fg.title("Blog do MatrUFSC2") fg.description("Feed do blog do MatrUFSC2, onde noticias e novos recursos sao anunciados primeiro!") fg.language('pt-BR') fg.link({"href":"/blog/feed","rel":"self"}) fg.updated(items[0]["posted_at"].replace(tzinfo=pytz.UTC)) for item in items: entry = fg.add_entry() entry.title(item["title"]) tree = html.fromstring(item["summary"]) cleaner = Cleaner(allow_tags=[]) tree = cleaner.clean_html(tree) text = tree.text_content() entry.description(text, True) entry.link({"href":item["link"],"rel":"self"}) entry.content(item["body"]) entry.published(item["posted_at"].replace(tzinfo=pytz.UTC)) entry.updated(item["posted_at"].replace(tzinfo=pytz.UTC)) entry.category({"label": item["category"]["title"], "term": item["category"]["slug"]}) entry.id(item["id"]) if atom: return fg.atom_str(pretty=True) else: return fg.rss_str(pretty=True)
def feed(column_id): api = Api(column_id) with request.urlopen(api.info) as stream: result = stream.read().decode('utf-8') if not result: return '', 404 info = json.loads(result) with request.urlopen(api.posts) as stream: result = stream.read().decode('utf-8') entries = json.loads(result) fg = FeedGenerator() fg.id(str(entries[0]['slug'])) fg.title(info['name']) fg.language('zh_CN') fg.icon(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 's')) fg.logo(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 'l')) fg.description(info['intro']) fg.author(dict(name=info['creator']['name'])) fg.link(href=api.base_url + info['url'], rel='alternate') for entry in entries: fe = fg.add_entry() fe.id(entry['url']) fe.title(entry['title']) fe.published(entry['publishedTime']) fe.updated(entry['publishedTime']) fe.author(dict(name=entry['author']['name'])) fe.link(href=api.base_url + entry['url'], rel='alternate') fe.content(entry['content']) return fg.atom_str(pretty=True)
def build_feed(self): "Build the feed given our existing URL" # Get all the episodes page_content = str(requests.get(self.url).content) parser = BassdriveParser() parser.feed(page_content) links = parser.get_links() # And turn them into something usable fg = FeedGenerator() fg.id(self.url) fg.title(self.title) fg.description(self.title) fg.author({'name': self.dj}) fg.language('en') fg.link({'href': self.url, 'rel': 'alternate'}) fg.logo(self.logo) for link in links: fe = fg.add_entry() fe.author({'name': self.dj}) fe.title(link[0]) fe.description(link[0]) fe.enclosure(self.url + link[1], 0, 'audio/mpeg') # Bassdrive always uses date strings of # [yyyy.mm.dd] with 0 padding on days and months, # so that makes our lives easy date_start = link[0].find('[') date_str = link[0][date_start:date_start+12] published = datetime.strptime(date_str, '[%Y.%m.%d]') fe.pubdate(UTC.localize(published)) fe.guid((link[0])) return fg
def run(folder, url): from feedgen.feed import FeedGenerator fg = FeedGenerator() head, tail = os.path.split(folder) title = tail.decode("utf-8") fg.id(str(uuid.uuid4())) fg.title(title) fg.link(href="{0}/rss.xml".format(url), rel="self") fg.description(u"Audiobook `{0}` generated with rssbook".format(title)) fg.load_extension("podcast") for item in sorted(os.listdir(folder)): if os.path.splitext(item)[1] == ".mp3": get_node(os.path.join(folder, item)) fullpath = os.path.join(folder, item) fe = fg.add_entry() fe.id(str(uuid.uuid4())) fe.title(title) fe.description(item) fe.link( href="{0}/{1}".format(url, item), rel="enclosure", type="audio/mpeg", length=str(os.stat(fullpath).st_size) ) fg.rss_file(os.path.join(folder, "rss.xml"))
def rss(): config = public_app.config['feed'] fg = FeedGenerator() fg.id('%s/blog' % Config.BASE_URL) fg.title(config['title']) fg.author( {'name': config['author'],'email': config['email']} ) fg.description(config['desc']) fg.link( href=Config.BASE_URL, rel='alternate' ) query = { 'id': { '$regex': 'blog' }, 'current': True, 'meta.hide': { '$ne': True } } posts = db.pages.find(query).sort('meta.created', -1)[:20] for post in posts: fe = fg.add_entry() fe.title(post['meta']['title']) if 'author' in post['meta']: fe.author( {'name': post['meta']['author'],'email': config['email']} ) else: fe.author( {'name': config['author'],'email': config['email']} ) fe.description(do_truncate(post['content'], 300)) fe.link(href="%s/%s" % (Config.BASE_URL, post['id']), rel='alternate') fe.pubdate(utc.localize(post['meta']['created'])) fe.content(post['content']) response.headers['Content-Type'] = 'application/rss+xml' return fg.rss_str(pretty=True)
def GET(self): cherrypy.response.headers["Access-Control-Allow-Origin"] = "*" fg = FeedGenerator() #TODO create icon # fg.icon('http://www.det.ua.pt') fg.id(config.get('rss','id')) fg.title(config.get('rss','title')) fg.subtitle(config.get('rss','subtitle')) fg.description(config.get('rss','description')) fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')}) fg.language(config.get('rss','language')) fg.link(href=config.get('rss','href'), rel='related') client = EmailClient() for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]): cherrypy.log("RSS Entry: "+msgn) em = client.getEMail(msgn) entry = fg.add_entry() entry.title(em['subject']) entry.author({'name': em['From']['name'], 'email': em['From']['email']}) entry.guid(config.get("main","baseurl")+'news/'+msgn) entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'}) entry.pubdate(em['date']) entry.content(em['body']) return fg.rss_str(pretty=True)
def generateFeeds(buffered, meta): utc = pytz.utc fg = FeedGenerator() fg.id(meta['id']) fg.title(meta['title']) fg.author(meta['author']) fg.subtitle(meta['subtitle']) fg.link( href=meta['link'], rel='self' ) fg.language(meta['language']) for tweet in buffered: fe = fg.add_entry() fe.id(tweet['url'].decode('utf-8')) fe.published(utc.localize(tweet['created_at']).astimezone(pytz.timezone(locale))) #fe.guid(tweet['url'].decode('utf-8')) fe.link(href=tweet['url'].decode('utf-8'), rel='alternate') fe.title(tweet['readable_title']) fe.description(tweet['readable_article']) try: fe.author({'name': '', 'email':tweet['user_name'].decode('utf-8') + ": " + tweet['text'].decode('utf-8')}) except Exception, e: logger.error(e) fe.author({'name': 'a', 'email':'*****@*****.**'})
def feed(): """ Generate atom feed """ entries = parse_posts(0, C.feed_count) fg = FeedGenerator() fg.id(str(len(entries))) fg.title(C.title) fg.subtitle(C.subtitle) fg.language(C.language) fg.author(dict(name=C.author, email=C.email)) fg.link(href=C.root_url, rel='alternate') fg.link(href=make_abs_url(C.root_url, 'feed'), rel='self') for entry in entries: fe = fg.add_entry() fe.id(entry.get('url')) fe.title(entry.get('title')) fe.published(entry.get('date')) fe.updated(entry.get('updated') or entry.get('date')) fe.link(href=make_abs_url(C.root_url, entry.get('url')), rel='alternate') fe.author(dict(name=entry.get('author'), email=entry.get('email'))) fe.content(entry.get('body')) atom_feed = fg.atom_str(pretty=True) return atom_feed
def makeRss(self): fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://hypecast.blackmad.com/' + self.mode) fg.title('Hype Machine Robot Radio: ' + self.mode) fg.author( {'name':'David Blackmad','email':'*****@*****.**'} ) fg.logo('http://dump.blackmad.com/the-hype-machine.jpg') fg.language('en') fg.link(href='http://hypecast.blackmad.com/' + self.mode) fg.description('Hype Machine Robot Radio: ' + self.mode) description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)]) fe = fg.add_entry() fe.title(self.track_name) fe.description(description) fe.id(self.filename) # add length print(self.relative_dir) print(self.filename) fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg") rss_str = fg.rss_str() newItem = ET.fromstring(rss_str)[0].find('item') out = open(self.get_filename('xml'), 'w') out.write(ET.tostring(newItem)) out.close() self.updateRss()
def setUp(self): fg = FeedGenerator() self.feedId = 'http://example.com' self.title = 'Some Testfeed' fg.id(self.feedId) fg.title(self.title) fg.link(href='http://lkiesow.de', rel='alternate')[0] fg.description('...') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1') fe.title('The First Episode') fe.content(u'…') # Use also the different name add_item fe = fg.add_item() fe.id('http://lernfunk.de/media/654321/1') fe.title('The Second Episode') fe.content(u'…') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1') fe.title('The Third Episode') fe.content(u'…') self.fg = fg
def generate(app, category, torrents): """ generate an rss feed from category with torrents as results if category is None this feed is for all categories """ feed = FeedGenerator() if category: url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category)) else: url = util.fullSiteURL(app, 'feed', 'all.rss') feed.link(href=url, rel="self") feed.id(url) if category: title = "new {} torrents on index ex invisibilis".format(category) else: title = "new torrents on index ex invisibilis" feed.title(title) feed.description(title) feed.author({"name": "anonymous"}) feed.language("en") for torrent in torrents: item = feed.add_entry() url = util.fullSiteURL(app, torrent.downloadURL()) item.id(torrent.infohash) item.link(href=url) item.title(torrent.title) item.description(torrent.summary(100)) return feed
def emit_group_rss(self, group=None, groupname=None): md = markdown.Markdown() from feedgen.feed import FeedGenerator fg = FeedGenerator() fg.id('https://h.jonudell.info') fg.title('Hypothesis group %s' % groupname) fg.author({'name': 'Jon Udell', 'email': '*****@*****.**'}) fg.description("Hypothesis notifications for group %s" % groupname) fg.link(href='https://h.jonudell.info/group_rss') fg.language('en') h = Hypothesis(token=self.token, limit=20) ids = self.data() annos = [] for id in ids: try: anno = h.get_annotation(id) assert ('id' in anno.keys()) annos.append(anno) except: print('cannot get %s, deleted?' % id) annos.sort(key=itemgetter('updated'), reverse=True) annos = [HypothesisAnnotation(a) for a in annos] for anno in annos: ref_user = None in_reply_to = None root_id = anno.id if len(anno.references) > 0: try: ref_id = anno.references[-1:][0] root_id = anno.references[0] ref = h.get_annotation(ref_id) ref_user = HypothesisAnnotation(ref).user in_reply_to = '<p>in reply to %s </p>' % ref_user except: print("cannot get user for ref_id %s, deleted?" % ref_id) fe = fg.add_entry() fe.id(anno.id) fe.title('%s annotated %s in the group %s at %s ' % (anno.user, anno.doc_title, groupname, anno.updated)) fe.author({"email": None, "name": anno.user, "uri": None}) dl = "https://hyp.is/%s" % anno.id fe.link({"href": "%s" % dl}) content = '' if ref_user is not None: content += in_reply_to if anno.exact is not None: content += '<p>in reference to: </p> <p> <blockquote><em>%s</em></blockquote></p>' % anno.exact content += '<p> %s <a href="https://hyp.is/%s">said</a>: </p> ' % ( anno.user, root_id) content += '%s ' % md.convert(anno.text) if len(anno.tags): content += '<p>tags: %s' % ', '.join(anno.tags) fe.content(content, type='CDATA') dt = dateutil.parser.parse(anno.updated) dt_tz = dt.replace(tzinfo=pytz.UTC) fe.pubdate(dt_tz) rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string fg.rss_file('%s.xml' % group) # Write the RSS feed to a file
def feed(request): fg = FeedGenerator() fg.id("bearblog") fg.author({"name": "Bear Blog", "email": "*****@*****.**"}) newest = request.GET.get("newest") if newest: fg.title("Bear Blog Most Recent Posts") fg.subtitle("Most recent posts on Bear Blog") fg.link(href="https://bearblog.dev/discover/?newest=True", rel="alternate") all_posts = (Post.objects.annotate( upvote_count=Count("upvote"), ).filter( publish=True, blog__reviewed=True, blog__blocked=False, show_in_feed=True, published_date__lte=timezone.now(), ).order_by("-published_date").select_related("blog") [0:posts_per_page]) else: fg.title("Bear Blog Trending Posts") fg.subtitle("Trending posts on Bear Blog") fg.link(href="https://bearblog.dev/discover/", rel="alternate") all_posts = (Post.objects.annotate( upvote_count=Count("upvote"), score=ExpressionWrapper( ((Count("upvote") - 1) / ((Seconds(Now() - F("published_date"))) + 4)**gravity) * 100000, output_field=FloatField(), ), ).filter( publish=True, blog__reviewed=True, blog__blocked=False, show_in_feed=True, published_date__lte=timezone.now(), ).order_by("-score", "-published_date").select_related( "blog").prefetch_related("upvote_set")[0:posts_per_page]) for post in all_posts: fe = fg.add_entry() fe.id(f"{post.blog.useful_domain()}/{post.slug}/") fe.title(post.title) fe.author({"name": post.blog.subdomain, "email": "hidden"}) fe.link(href=f"{post.blog.useful_domain()}/{post.slug}/") fe.content(clean_text(mistune.html(post.content)), type="html") fe.published(post.published_date) fe.updated(post.published_date) if request.GET.get("type") == "rss": fg.link(href=f"{post.blog.useful_domain()}/feed/?type=rss", rel="self") rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type="application/rss+xml") else: fg.link(href=f"{post.blog.useful_domain()}/feed/", rel="self") atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type="application/atom+xml")
def rss_feed_builder(feed): fg = FeedGenerator() fg.id(feed.route) fg.title(feed.name) fg.link(feed.route, rel="self") fg.description(feed.description or "") fg.language(feed.lang) return fg
def build_xml_feed(allchapters, verbose=True): if verbose: print print "Generating feeds..." if len(allchapters) == 0: raise CRMangaFeedException("Empty chapter list") crtz = pytz.timezone('America/New_York') fg = FeedGenerator() fg.id('http://utils.senpai.moe/') fg.title('Crunchyroll Manga - Latest Chapters (Unofficial)') fg.author({'name': 'Nosgoroth', 'email': '*****@*****.**'}) fg.link(href='http://utils.senpai.moe/') fg.subtitle( 'Latest manga chapters, updated daily, using undocumented API.') fg.language('en') fg.ttl(15) allchapters = sorted(allchapters, key=itemgetter('updated_t'), reverse=True) first = allchapters[0]["updated_t"].replace(tzinfo=crtz) fg.updated(first) fg.lastBuildDate(first) for chapter in allchapters[0:100]: fe = fg.add_entry() fe.id(chapter["url"]) fe.link({ "href": chapter["url"], "rel": "alternate", "title": "Read online" }) fe.title("%s - %s" % (chapter["series"], chapter["name"])) fe.summary("<p>%s has been added to %s in Crunchyroll Manga.</p>" % (chapter["name"], chapter["series"])) fe.published(chapter["updated_t"].replace(tzinfo=crtz)) chapter_serial = chapter.copy() chapter_serial.pop("updated_t", None) chapter_serial.pop("url", None) chapter_serial.pop("thumb", None) chapter_serial["chapter_id"] = chapter_serial["guid"] chapter_serial.pop("guid", None) content = "<p>%s has been added to %s in Crunchyroll Manga.</p><p>Updated: %s</p><img src=\"%s\" />" % ( chapter["name"], chapter["series"], chapter["updated"], chapter["thumb"]) content += "<!--JSON:[[%s]]-->" % json.dumps(chapter_serial) fe.content(content) fg.rss_file(os.path.join(DESTINATION_FOLDER, 'updates_rss.xml'), pretty=DEBUG) # Write the RSS feed to a file fg.atom_file(os.path.join(DESTINATION_FOLDER, 'updates_atom.xml'), pretty=DEBUG) # Write the ATOM feed to a file
def create_mock_fg(): fg = FeedGenerator() fg.id(FEED_ID) fg.title('Some Test Feed') fg.author({'name': 'Edfward', 'email': '*****@*****.**'}) fg.subtitle('Test feed subtitle!') fg.link(href=FEED_ID, rel='self') fg.language('en') return fg
def __init_feed(setup): feed = FeedGenerator() feed.id(setup.get("id")) feed.title(setup.get("title")) feed.subtitle(setup.get("subtitle")) feed.author({"name": setup.get("name"), "email": setup.get("email")}) feed.link(href=setup.get("link"), rel="self") feed.logo(setup.get("logo")) return feed
def init_fg(self, repo_info): fg = FeedGenerator() title = 'Recent commits to ' + repo_info['full_name'] fg.title(title) fg.link(href=repo_info['html_url']) fg.updated(repo_info['updated_at']) fg.id(repo_info['html_url']) fg.author(repo_info['author']) return fg
def rss(conversation, url, author_name, author_email, title, subtitle, language, output_path): """Export all the links of the conversation in a simple RSS feed""" from feedgen.feed import FeedGenerator fg = FeedGenerator() fg.id(url) fg.title(title) fg.author( { 'name': author_name, 'email': author_email, } ) fg.link( href=url, rel='alternate' ) if subtitle: fg.subtitle(subtitle) fg.language(language) for message in conversation.history(): match = re.search( "^.*<(?P<url>[^>|]+)\|?(?P<title>[^>]+)?>.*$", message.data["text"], flags=re.MULTILINE ) if match is not None: fe = fg.add_entry() link = match.group("url") title = match.group("title") or link date = naive_to_local(datetime.datetime.fromtimestamp(float(message.data["ts"]))) description = message.data["text"] if "attachments" in message.data: attachment = [a for a in message.data["attachments"] if a["title_link"] == link][0] title += " | " + attachment["title"] description += """ """ + attachment["text"] fe.id(link) fe.title(title) fe.link(href=link) fe.published(date) user = config.slack.get_user(message.data["user"]) author = { "name": message.data["username"], "email": user.email or "noemail", } fe.author(author) fe.description(description) fg.rss_file(output_path, pretty=True)
def listen_for_urls(self, msg, match): url = re.findall( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', str(msg)) p = re.compile('\/(.*)') user = re.search(p, str(msg.getFrom())).group()[1:] if len(url) == 1: url = str(url[0]) filename = '/mnt/extern1/SYSTEM/www/foorss/' + user + '.xml' fg = FeedGenerator() # Some pages block urllib2 so we need a fake user agent header = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive' } req = urllib2.Request(url, headers=header) try: soup = BeautifulSoup(urllib2.urlopen(req)) except urllib2.HTTPError, e: print e.fp.read() yield "Error while parsing the website..." if os.path.isfile(filename): fg.from_rss(filename) else: fg.id(user) fg.title('Some Testfeed') fg.link(href='http://nix.da', rel='alternate') fg.description('This is a cool feed!') if soup.title != None: title = soup.title.string else: title = url fe = fg.add_entry() fe.id(url) fe.title(title) fe.description('Description') fe.link([{'href': url}]) fg.rss_file(filename) yield title + ' from ' + user + ' (rss updated)'
def make_feedgenerator(conf): feedgen = FeedGenerator() feedgen.title('Lojban twitter feed in {lang}'.format(lang=conf['long'])) feedgen.description('Twitter Atom feed in {lang} about the constructed language Lojban'.format(lang=conf['long'])) feedgen.language(conf['short']) feedgen.link(href='{}.atom.xml'.format(conf['short'])) feedgen.id('{}.atom.xml'.format(conf['short'])) feedgen.generator(generator='bano', version='0.0.0', uri='https://github.com/kyrias/bano') return feedgen
def _get_feed(cls, query: Optional[ClassicAPIQuery] = None) -> FeedGenerator: fg = FeedGenerator() fg.generator("") fg.register_extension("opensearch", OpenSearchExtension) fg.register_extension("arxiv", ArXivExtension, ArXivEntryExtension, rss=False) if query: if query.phrase is not None: query_string = phrase_to_query_string(query.phrase) else: query_string = "" if query.id_list: id_list = ",".join(query.id_list) else: id_list = "" fg.title(f"arXiv Query: {query.to_query_string()}") # From perl documentation of the old site: # search_id is calculated by taking SHA-1 digest of the query # string. Digest is in bytes form and it's 20 bytes long. Then it's # base64 encoded, but perls version returns only 27 characters - # it omits the `=` sign at the end. search_id = base64.b64encode( hashlib.sha1(query.to_query_string().encode( "utf-8")).digest()).decode("utf-8")[:-1] fg.id( cls._fix_url( url_for("classic_api.query").replace( "/query", f"/{search_id}"))) fg.link({ "href": cls._fix_url( url_for( "classic_api.query", search_query=query_string, start=query.page_start, max_results=query.size, id_list=id_list, )), "type": "application/atom+xml", }) else: # TODO: Discuss better defaults fg.title("arXiv Search Results") fg.id("https://arxiv.org/") fg.updated(to_utc(datetime.utcnow())) return fg
def create_feed_generator(): fg = FeedGenerator() fg.id("https://github.com/fadawar/postoj-rss") fg.title("Postoj.sk") fg.author({"name": "Postoj.sk"}) fg.link(href="https://www.postoj.sk", rel="alternate") fg.logo("https://www.postoj.sk/assets/frontend/build/img/brand-main.png") fg.subtitle("Konzervatívny denník") fg.language("sk") return fg
def create_generator(self): """ Setup and return a feedgen generator object """ fg = FeedGenerator() fg.title(self.title) fg.id(self.root_url) fg.link(href=self.root_url, rel='alternate') fg.language(u'en') fg.description(self.description) fg.rights(u"Copyright Contrivers' Review {}".format(datetime.datetime.now().year)) return fg
def test_content_cdata_type(self): fg = FeedGenerator() fg.title('some title') fg.id('http://lernfunk.de/media/654322/1') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654322/1') fe.title('some title') fe.content('content', type='CDATA') result = fg.atom_str() assert b'<content type="CDATA"><![CDATA[content]]></content>' in result
def all_new_rss(): """ RSS feed for /all/new """ posts = misc.getPostList(misc.postListQueryBase(), 'new', 1).dicts() fg = FeedGenerator() fg.id(request.url) fg.title('All new posts') fg.link(href=request.url_root, rel='alternate') fg.link(href=request.url, rel='self') return Response(misc.populate_feed(fg, posts).atom_str(pretty=True), mimetype='application/atom+xml')
def rss_feed(feed="page", db="https://notdb.martyni.co.uk"): bucket = "authmartynicouk" if request.args.get("tag"): feed = request.args.get("tag") feed_url = "{db}/{bucket}/list/{feed}?reverse=true".format(db=db, bucket=bucket, feed=feed) print feed_url episodes_links = requests.get(feed_url).json() episodes = [requests.get(db + link).json() for link in episodes_links] print episodes description = str(episodes[-1].get('description')) author = str(episodes[-1].get('author')) title = feed email = "*****@*****.**" fg = FeedGenerator() fg.load_extension('podcast') fg.id(request.url) fg.podcast.itunes_category('Technology', 'Podcasting') fg.author({'name': author, 'email': email}) fg.link(href=request.url, rel='self') fg.description(description) fg.title(title) fg.image(url="{db}/{bucket}/file/{feed}_image.png".format(db=db, bucket=bucket, feed=feed), title=feed.title(), link=request.url, width='123', height='123', description=description) counter = 1 for i in episodes: try: print type(i) if type(i) is not dict: i = json.loads('"' + i.replace('"', "'") + '"') author = i.get("author") or "anonymous" email = i.get("email") or "*****@*****.**" title = str(i.get('title')).title() or "title" contents = i.get("contents")[0].replace("`", "'").replace( u"¬", "'") or "contents" fe = fg.add_entry() fe.id(str(counter) + "mp3") fe.title(str(i.get('title')).title()) fe.description(contents) if i.get("media"): fe.enclosure(i.get("media"), 0, 'audio/mpeg') fe.link(href=request.url, rel='alternate') fe.author(name=author, email=email) except: pass counter += 1 return Response(fg.rss_str(), mimetype='text/xml')
def feed_http(request): """HTTP Cloud Function. Args: request (flask.Request): The request object. <http://flask.pocoo.org/docs/1.0/api/#flask.Request> Returns: The response text, or any set of values that can be turned into a Response object using `make_response` <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>. """ request_args = request.args url = request_args['url'] g = Grab() fg = FeedGenerator() g.go(url) fg.id(url) fg.title('Rabota.UA | rss feed') url_parsed = urlparse(g.response.url) fg.link(href=url_parsed.scheme + '://' + url_parsed.hostname, rel='alternate') fg.description(g.doc('/html/head/title').text()) count = int( g.doc('//span[@id="ctl00_content_vacancyList_ltCount"]/span').one(). text()) if count == 0: itm_list = [] else: articles = g.doc.select( '//table[contains(@class, "f-vacancylist-tablewrap")]').one() itm_list = articles.select( 'tr[@id]/td/article/div[contains(@class, "card-body")]') for item in itm_list: vac_title = item.select( 'div[1]//h2[contains(@class, "card-title")]/a/@title').text( ).strip() vac_url = g.make_url_absolute( item.select( 'div[1]//h2[contains(@class, "card-title")]/a/@href').text()) try: vac_description = item.select( 'div[contains(@class, "card-description")]').text().strip() except weblib.error.DataNotFound: vac_description = 'N/A' fe = fg.add_entry() print(vac_title) fe.id(vac_url) fe.link({'href': vac_url}) fe.source(vac_url) fe.title(vac_title) fe.description(vac_description) response = make_response(fg.atom_str(pretty=True, extensions=False)) response.headers['Content-Type'] = 'application/rss+xml; charset=UTF-8' return response
def main(argv): ap = argparse.ArgumentParser( description=''' Render RSS and Atom feeds from a CSV of food inspection data. ''') ap.add_argument( '-v', '--verbose', action='count', dest='verbosity', default=0, help='increase global logging verbosity; can be used multiple times') ap.add_argument( '-f', '--format', choices=['rss', 'atom'], default='atom', help=''' specify the format to use when rendering the feed (default: %(default)s)') ''') ap.add_argument( '-n', '--num_incidents', metavar='<num>', type=int, default=10, help='render <num> recent incidents in the feed (default: %(default)s)') ap.add_argument( 'flavor', nargs='?', default='all', choices=['all', 'failures'], help='select the flavor of feed to render (default: %(default)s)') args = ap.parse_args() logging.basicConfig( level=logging.ERROR - args.verbosity * 10, style='{', format='{}: {{message}}'.format(ap.prog)) fg = FeedGenerator() fg.id('http://pgriess.github.io/dallas-foodscores/') fg.link(href=fg.id(), rel='self') fg.title('Dallas Food Inspection Scores') fg.subtitle(''' Food inspection scores from the official City of Dallas dataset; updated daily ''') fg.description(fg.subtitle()) fg.language('en') fg.author( name='Peter Griess', email='*****@*****.**', uri='https://twitter.com/pgriess') for i in get_inspections_to_feed(sys.stdin, args.num_incidents, args.flavor): fe = fg.add_entry() fe.title('{name} at {address} scored {score}'.format( name=i.name, address=i.address, score=i.score)) fe.id(fg.id() + '#!/' + str(abs(hash(i)))) fe.link(href=fe.id(), rel='alternate') fe.content(fe.title()) fe.published(TZ.localize(i.date)) if args.format == 'atom': print(fg.atom_str(pretty=True)) else: print(fg.rss_str(pretty=True))
def rss(request): # track it! # v=1 // Version. # &tid=UA-XXXXX-Y // Tracking ID / Property ID. # &cid=555 // Anonymous Client ID. # &t=pageview // Pageview hit type. # &dh=mydemo.com // Document hostname. # &dp=/home // Page. # &dt=homepage // Title. angrates_uuid = uuid.UUID('f93c5388-f60b-5159-bbfc-d08d6f7b401f') x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') cid = uuid.uuid5(angrates_uuid, ip) data = { 'v': 1, 'tid': 'UA-19269567-1', 'cid': cid, 't': 'pageview', 'dh': 'armstrongandgettybingo.com', 'dp': '/rss/', 'dt': 'Podcast', } requests.post('https://www.google-analytics.com/collect', data=data) fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://www.armstrongandgettybingo.com/rss') fg.podcast.itunes_category('News & Politics', 'Conservative (Right)') fg.podcast.itunes_explicit('no') fg.title('The Armstrong and Getty Show (Bingo)') fg.author( {'name':'Ben Friedland','email':'*****@*****.**'} ) fg.link( href='http://www.armstrongandgettybingo.com', rel='alternate' ) fg.logo('https://s3-us-west-1.amazonaws.com/bencast/bingologo.png') fg.subtitle('Armstrong and Getty Bingo') fg.description('The Armstrong and Getty Show - Unofficial Feed including Archives back to 2001.') fg.link( href='http://www.armstrongandgettybingo.com/rss', rel='self' ) fg.language('en') pacific = pytz.timezone('America/Los_Angeles') for hour in Hour.objects.all().order_by('-pub_date'): fe = fg.add_entry() fe.id(hour.link) fe.title(hour.title) fe.description(hour.description) fe.enclosure(hour.link, 0, 'audio/mpeg') fe.published(pacific.localize(hour.pub_date)) return HttpResponse(fg.rss_str(pretty=True), content_type='application/rss+xml')
def rssvideoschannel(request, channel_id): channel = Channel.objects.get(channel_id=channel_id) if not channel: return Http404 videos = channel.video_set.order_by('-pub_date') fg = FeedGenerator() fg.load_extension('podcast') channelURL = ''.join([ 'http://', get_current_site(request).domain, reverse('you2rss:videoperchannel', args=(channel_id, )) ]) fg.id(channelURL) fg.title(channel.title_text) fg.author({'name': 'pon sko', 'email': '*****@*****.**'}) fg.link(href=channelURL, rel='alternate') description = channel.description_text if len(description) < 2: description = "no desc" fg.subtitle(description) fg.description(description) fg.language('en') fg.logo(logo=channel.thumbnail) fg.image(url=channel.thumbnail, title=channel.title_text) fg.podcast.itunes_image(channel.thumbnail) for video in videos: fe = fg.add_entry() fe.author(name=channel.title_text) videodesc = video.description_text if len(videodesc) < 2: videodesc = "no desc" fe.content(videodesc) fileURL = ''.join([ 'http://', get_current_site(request).domain, reverse('you2rss:rssfile', args=(video.video_id, )) ]) fe.enclosure(fileURL, '1337', 'audio/mpeg') fe.id(fileURL) fe.link(href=fileURL, rel='alternate') fe.podcast.itunes_image(video.thumbnail) fe.pubdate(video.pub_date) fe.published(video.pub_date) fe.title(video.title_text) rssdata = fg.rss_str(pretty=True) response = HttpResponse(rssdata, content_type='application/rss+xml; charset=UTF-8') response['Content-Length'] = len(rssdata) return response
def get_top_25_post_any_comm(num_of_posts): try: response = requests.get( 'http://localhost:5100/api/v1/resources/votes/top/{}'.format( num_of_posts), ) #return the top 25 # If the response was successful, no Exception will be raised response.raise_for_status() except HTTPError as http_err: print(f'HTTP error occurred: {http_err}') # Python 3.6 except Exception as err: print(f'Other error occurred: {err}') # Python 3.6 else: print('Success!') #Inspect some attributes of the `requests` repository votes_json_response = response.json() post_ids = [] # create a list of post ids to be sorted for vote in votes_json_response: post_ids.append(vote['PostID']) #sort the list of ids with the post request sorted_votes = requests.post( 'http://localhost:5100/api/v1/resources/votes/list', data={'list': str(post_ids)}) sorted_votes_json_response = sorted_votes.json() #The top 25 posts to a particular community, sorted by score fg = FeedGenerator() fg.id( 'http:localhost:5200/api/rss/resources/posts/{}'.format(num_of_posts)) fg.title('The top 25 posts to any community, sorted by score') fg.subtitle('Reddit') fg.language('en') fg.link(href='reddit.com') for vote in sorted_votes_json_response: response = requests.get( 'http://localhost:5000/api/v1/resources/posts/{}'.format( vote['PostID']) ) #given the id of the votes find the corresponding post #Add Feed Entries for all 25 posts post_response = response.json() fe = fg.add_entry() fe.id(str(vote['PostID'])) fe.title(post_response['title']) fe.author({'name': post_response['username']}) fe.pubDate(post_response['date'] + '-7:00') #Generate the Feed rssfeed = fg.rss_str(pretty=True) rssfeed = rssfeed.decode() #changes type bytes to type string return Response(rssfeed, mimetype='rss+xml' ) #changes the Content-Type of the application to rss+xml
def test_summary_html_type(self): fg = FeedGenerator() fg.title('some title') fg.id('http://lernfunk.de/media/654322/1') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654322/1') fe.title('some title') fe.link(href='http://lernfunk.de/media/654322/1') fe.summary('<p>summary</p>', type='html') result = fg.atom_str() expected = b'<summary type="html"><p>summary</p></summary>' assert expected in result
def create_fg(): # Create the feed fg = FeedGenerator() fg.id("http://www.accre.vanderbilt.edu") fg.title("ACCRE's Status Feed") fg.author(dict(name="Josh Arnold", email="*****@*****.**")) fg.link(href="http://www.accre.vanderbilt.edu", rel="alternate") fg.logo("http://www.accre.vanderbilt.edu/" "wp-content/themes/ashford/favicon.ico") fg.subtitle("ACCRE's Status Feed") fg.language('en') return fg
def feed(request, slug): """ Return an RSS feed :param request: The request object. :param slug: The slug for the requested feed. :return: The rendered feed. """ out_feed = get_object_or_404(OutFeed, slug=slug) url = "{}://{}{}".format(request.scheme, request.get_host(), reverse('posts', args=[slug])) fg = FeedGenerator() fg.id(url) fg.title(out_feed.title) fg.link(href=url, rel='alternate') fg.description(out_feed.description) fg.pubDate(out_feed.updated) in_feeds = InFeed.objects.filter(out_feed=out_feed, enabled=True) posts = Post.objects.filter(in_feed__in=[f.id for f in in_feeds], enabled=True) for post in posts: if post.override_desc is not None and post.override_desc != '': description = post.override_desc else: description = post.description description = description + ' [<a href="{}">Continue reading...</a>]'.\ format(post.link) fe = fg.add_entry() fe.id(post.id) fe.title(post.title) fe.description(description) fe.author({'name': post.author}) fe.link(href=post.link) fe.guid(post.guid) if post.override_pub is not None: fe.pubDate(post.override_pub) fe.updated(post.override_pub) else: fe.pubDate(post.published) fe.updated(post.published) data = fg.atom_str(pretty=True) response = HttpResponse(data, content_type='application/rss+xml') response['Content-Length'] = len(data) return response
def feed_header(): fg = FeedGenerator() fg.id('https://goodgame.ru/forum/') fg.title('Goodgame forum') fg.description('forum') fg.author({ 'name': 'strayge', 'email': '*****@*****.**', }) fg.link(href='https://goodgame.ru/forum/', rel='alternate') fg.language('ru') return fg
def update_ics(self, data, rss=None): c = Calendar(events=self.get_exist_events()) for e in self.new_events(data): # remove by hash if e in c.events: c.events.remove(e) # add the newer one c.events.add(e) # print(c.events) if rss: assert rss in ["atom", "rss"] fg = FeedGenerator() fg.id(self.name) fg.title(f"Events of {self.cal_name}") for i, e in enumerate(c.events): # type: Event fe = fg.add_entry() fe.id(e.uid) fe.title(e.name) fe.link(href=e.url) fe.updated(e.begin.datetime) market = e.name.split("|")[0].strip() # only latest symbols if market == "US": # and len(c.events) - i <= 5: # disable for timeout in now server info_html = get_ipo_info_html(e.uid, enable=False) link = f'<p><a href="https://www.nasdaq.com/symbol/{e.uid}">Goto NASDAQ detail page</a></p>' fe.description( f"<p>{e.description}</p> {link} {info_html}") else: fe.description(e.description) rss_output = self.get_output_path(rss) if rss == "atom": fg.atom_file(rss_output) print(f"wrote {rss_output}") elif rss == "rss": fg.rss_file(rss_output) print(f"wrote {rss_output}") else: ics_output = self.get_output_path() with open(ics_output, "w") as f: wrote = False for l in c: f.write(l) if not wrote and l.startswith("VERSION"): f.write(f"X-WR-CALNAME:{self.cal_name}\n") wrote = True print(f"wrote {ics_output}")
def all_new_rss(): """ RSS feed for /all/new """ posts = misc.getPostList(misc.postListQueryBase(), "new", 1) fg = FeedGenerator() fg.id(request.url) fg.title("All new posts") fg.link(href=request.url_root, rel="alternate") fg.link(href=request.url, rel="self") return Response( misc.populate_feed(fg, posts).atom_str(pretty=True), mimetype="application/atom+xml", )
def create_feed(): # date, speaker, papers fg = FeedGenerator() fg.id(uuid4()) fg.title("Seminar Schedule") fg.description("See ") fg.link(href="https://cgm.cs.ntust.edu.tw/seminar/") fg.language("en") fe = fg.add_entry() fe.id() print( fg.atom_str(pretty=True) ) print( fg.rss_str(pretty=True) )
def init_feed(self, playlist): fg = FeedGenerator() fg.load_extension('podcast') fg.id(playlist['webpage_url']) if playlist['title']: fg.title(playlist['title']) fg.description(playlist['title']) link = {'href': playlist['webpage_url'], 'rel': 'alternate'} fg.link(link) return fg
def construct_feed(links, id=''): """ Constructs a feed from a list of links """ fg = FeedGenerator() fg.id('http://links.metadada.xyz/fb/' + id) fg.title('links') fg.subtitle( 'links from your friends without the ads, memoir, or auto-playing videos' ) fg.link(href='http://links.metadada.xyz/', rel='self') for l in sorted(links, key=lambda item: item['created_time'], reverse=True): add_to_feed(l, fg.add_entry(), id=id) return fg
def news_feed(): feed = FeedGenerator() feed.id("https://jazzband.co/news/feed") feed.link(href="https://jazzband.co/", rel="alternate") feed.title("Jazzband News Feed") feed.subtitle("We are all part of this.") feed.link(href=full_url(request.url), rel="self") # the list of updates of all news for setting the feed's updated value updates = [] for page in news_pages: if page.path == "index": continue # make the datetime timezone aware if needed published = page.meta.get("published", None) if published and published.tzinfo is None: published = pytz.utc.localize(published) updated = page.meta.get("updated", published) if updated: if updated.tzinfo is None: updated = pytz.utc.localize(updated) updates.append(updated) summary = page.meta.get("summary", None) author = page.meta.get("author", None) author_link = page.meta.get("author_link", None) url = full_url(url_for("content.news", path=page.path)) entry = feed.add_entry() entry.id(url) entry.title(page.meta["title"]) entry.summary(summary) entry.content(content=str(page.html), type="html") if author is not None: author = {"name": author} if author_link is not None: author["uri"] = author_link entry.author(author) entry.link(href=url) entry.updated(updated) entry.published(published) sorted_updates = sorted(updates) feed.updated(sorted_updates and sorted_updates[-1] or datetime.utcnow()) return Response(feed.atom_str(pretty=True), mimetype="application/atom+xml")
def feed(request, **kwargs): #return HttpResponse("Howdy!") pk = kwargs.get('pk') podcast = Podcast.objects.get(pk=pk) fg = FeedGenerator() fg.load_extension('podcast') fg.id("{}-{}".format(podcast.id, podcast.title).replace(' ', '_')) fg.title(podcast.title) fg.author({'name': podcast.author, 'email': '*****@*****.**'}) fg.link(href=request.build_absolute_uri( reverse('podcast_feed', kwargs={'pk': pk}))) #fg.logo('http://www.mormonnewsroom.org/media/960x540/ElderBallard.jpg') fg.image(url=podcast.cover, title=podcast.title, link=request.build_absolute_uri( reverse('podcast_items', kwargs={'pk': pk})), width='800', height='600') fg.description(podcast.description) #fg.summary(podcast.summary) #fg.author({'name': 'why me, of course', 'email':'*****@*****.**'}) # author not actually put in the feed if pk: item_list = Podcast_Item.objects.filter( podcast__pk=pk ) #Item.objects.filter(podcast_item__podcast__pk=kwargs.get('pk')) else: item_list = Podcast_Item.objects.all() for item in item_list: fe = fg.add_entry() fe.id("{}-{}".format(item.id, item.item.title.replace(' ', '_'))) fe.title(item.item.title) fe.pubdate(item.pub_date) fe.link(href=item.item.link) fe.description(item.item.description) fe.enclosure(item.item.link, 0, 'audio/mpeg') #fe.id('http://media2.ldscdn.org/assets/general-conference/april-2018-general-conference/2018-03-1020-m-russell-ballard-64k-eng.mp3?download=true') # fe.id('12345') #fe.title('First') #fe.link(href='http://35.224.79.205/first') #fe.description('This is the description of the first item') #fe.enclosure('http://media2.ldscdn.org/assets/general-conference/april-2018-general-conference/2018-03-1020-m-russell-ballard-64k-eng.mp3?download=true', 0, 'audio/mpeg') return HttpResponse(fg.rss_str(pretty=False), content_type="text/xml")
def staticrss(request, podcast_id): podcast = Podcast.objects.get(id=podcast_id) if not podcast: return Http404 pods = podcast.pod_set.order_by('-audio_link') fg = FeedGenerator() fg.load_extension('podcast') channelURL = ''.join([ 'http://', get_current_site(request).domain, reverse('you2rss:staticrss', args=(podcast_id, )) ]) fg.id(channelURL) fg.title(podcast.title_text) fg.author({'name': 'pon sko', 'email': '*****@*****.**'}) fg.link(href=channelURL, rel='alternate') description = podcast.description_text if len(description) < 2: description = "no desc" fg.subtitle(description) fg.description(description) fg.language('en') fg.logo(logo=podcast.thumbnail) fg.image(url=podcast.thumbnail, title=podcast.title_text) fg.podcast.itunes_image(podcast.thumbnail) for pod in pods: fe = fg.add_entry() fe.author(name=podcast.title_text) desc = pod.description_text if len(desc) < 2: desc = "no desc" fe.content(desc) fileURL = pod.audio_link #''.join(['http://', get_current_site(request).domain, # reverse('you2rss:rssfile', args=(video.video_id,))]) fe.enclosure(fileURL, pod.audio_size, pod.audio_type) fe.id(fileURL) fe.link(href=fileURL, rel='alternate') fe.podcast.itunes_image(podcast.thumbnail) fe.pubdate(pod.pub_date) fe.published(pod.pub_date) fe.title(pod.title_text) rssdata = fg.rss_str(pretty=True) response = HttpResponse(rssdata, content_type='application/rss+xml; charset=UTF-8') response['Content-Length'] = len(rssdata) return response
def render_feed(text_paths, outpath): # http://rhodesmill.org/brandon/feed # http://rhodesmill.org/brandon/category/python/feed # http://rhodesmill.org/brandon/feed/atom/ t0 = datetime.min.time() def fix(d): dt = datetime.combine(d, t0) return timezone('US/Eastern').localize(dt) posts = [post_info(path) for path in text_paths if date_of(path)] posts = sorted(posts, key=lambda post: post['date']) posts = posts[-1:] most_recent_date = max(post['date'] for post in posts) def full(url): return 'http://rhodesmill.org/' + url.lstrip('/') fg = FeedGenerator() fg.id(full('/')) fg.author({'name': 'Brandon Rhodes'}) fg.language('en') fg.link(href=full('/brandon/'), rel='alternate') if 'python' in outpath: fg.link(href=full('/brandon/category/python/feed/'), rel='self') else: fg.link(href=full('/brandon/feed/'), rel='self') fg.subtitle('Thoughts and ideas from Brandon Rhodes') fg.title("Let's Discuss the Matter Further") fg.updated(fix(most_recent_date)) for post in posts: url = full(post['url_path']) excerpt = truncate_at_more(post['body_html'], url) fe = fg.add_entry() fe.content(excerpt, type='html') fe.guid(url, permalink=True) fe.id(url) fe.link({'href': url}) fe.published(fix(post['date'])) fe.title(post['title']) fe.updated(fix(post['date'])) rss = fg.rss_str(pretty=True) fg.link(href=full('/brandon/feed/atom/'), rel='self', replace=True) atom = fg.atom_str(pretty=True) return rss, atom
def feed_collection(region, local_url): fg = FeedGenerator() id_url = local_url + region fg.id(id_url) fg.title(region) fg.author({'name': 'yujie'}) fg.subtitle('atom format') fg.language('en') fe = fg.add_entry() fe.id(id_url) fe.title(region) fe.link(href=id_url) return fg
def setup_feed(): fg = FeedGenerator() fg.load_extension('podcast') fg.language('en') fg.id('https://jdelman.me/potato') fg.author(name='Potato', email='*****@*****.**') fg.link(href='https://jdelman.me/potato', rel='alternate') fg.logo('https://jdelman.me/static/potato.jpg') fg.title("Potato - Josh's Saved Videos") fg.subtitle("Automatically generated RSS.") return fg
def _create_feed(self) -> FeedGenerator: fg = FeedGenerator() fg.id(self.URL + "content") fg.title(self.config.get('lw', 'blogTitle')) fg.author({ "name": self.config.get('lw', 'blogAuthor'), "email": self.config.get('lw', 'blogAuthorEmail') }) fg.link(href=self.URL, rel="alternate") fg.logo(self.URL + "image/favicon.ico") fg.subtitle(self.config.get('lw', 'blogSubTitle')) fg.link(href=self.URL + "content/rss.xml", rel="self") fg.language(self.config.get('lw', 'blogLang')) return fg
def __init__( self, id_: str, links: List[dict], title: str, updated: Optional[str], ): fg = FeedGenerator() fg.id(id_) fg.link(links) fg.title(title) fg.updated(updated) fg.generator("") self._feed_generator = fg