def out_rss(self,filename): fg=FeedGenerator() fg.register_extension('albopop',AlbopopExtension,AlbopopEntryExtension) fg.id(self.url) fg.title(self.title) fg.description(self.title) fg.author({'name':'alboPOP','email':''}) fg.link(href=self.url) fg.pubDate(formatdate()) fg.webMaster(self.webMaster) fg.docs('https://github.com/mfortini/alboPOP_saga') fg.language('it') fg.albopop.categoryName(self.categoryName) fg.albopop.categoryType(self.categoryType) for item in self.items: fe=fg.add_entry() fe.id(item['link']) fe.category(term=item['tipo']) fe.pubdate(item['pubDate']) fe.link(href=item['link']) fe.title(item['title']) fe.description(item['description']) fe.albopop.categoryUID(str(item['numero'])+'/'+str(item['anno'])) fg.rss_file(filename)
def get_weekly_jobs_rss(): redis_url = current_app.config.get("CACHE_REDIS_URL") jobs_weekly_email_key = current_app.config.get( "WEEKLY_JOBS_EMAIL_REDIS_KEY") redis = StrictRedis.from_url(redis_url) raw_email_entry = redis.hgetall(jobs_weekly_email_key) title = raw_email_entry[b"title"].decode("UTF-8") content = raw_email_entry[b"html"].decode("UTF-8") timestamp = float(raw_email_entry[b"timestamp"]) date = datetime.fromtimestamp(timestamp, tz=pytz.UTC) feed = FeedGenerator() feed.link(href=request.url_root) feed.title("INSPIRE Weekly HEP Jobs") feed.author({"name": "inspirehep.net"}) feed.description("Feed for weekly HEP jobs from INSPIRE") feed.pubDate(date) feed.lastBuildDate(date) entry = feed.add_entry() entry.id(str(timestamp)) entry.title(title) entry.content(content) entry.published(date) return Response(response=feed.rss_str(), mimetype="application/rss+xml")
def _create_feed(speaker, talks, file_name): LOGGER.info("Creating feed for %s", speaker) updated = talks[0]['time'] fg = FeedGenerator() fg.load_extension('podcast') fg.language('en') fg.title(f'Talks By {speaker}') fg.link(href='http://philip.lundrigan.org/Speakercast/') fg.image(url=f'http://philip.lundrigan.org/Speakercast/covers/{urllib.parse.quote(speaker)}.jpg', title=f'General Conference talks by {speaker}.') fg.description(f'General Conference talks by {speaker}.') fg.author({'name':'Philip Lundrigan', 'email':'*****@*****.**'}) fg.generator('Speakercast') fg.pubDate(updated) fg.lastBuildDate(updated) fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') for talk in talks: fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1/file.mp3') fe.title(talk['title']) fe.description(talk['preview']) fe.content(talk['html'], type='CDATA') fe.enclosure(talk['audio_url'], str(talk['audio_size']), 'audio/mpeg') fe.id(talk['uri']) fe.link(href=talk['url']) fe.published(talk['time']) fg.rss_file(file_name, pretty=True)
def generateRss(articlesList=exampleArticlesList, directoryPath='public_files', baseUrl='https://engn1931z99.pythonanywhere.com/'): """ list of article dictionaries -> Create a feed, which contains all the entries (1 entry for each article in articlesList), with all the information as requested. The RSS file should be saved in the specified directory. """ # following method from http://lkiesow.github.io/python-feedgen/ for overall generation fg = FeedGenerator() fg.load_extension('podcast') fg.title('arXiv.org EE SP RSS Podcast') fg.link(href=baseUrl) fg.language('en') #get most recent image for the main podcast picture images = sorted( [f for f in os.listdir(directoryPath) if f.endswith('.png')] ) #method adapted from https://stackoverflow.com/questions/9788119/how-to-get-the-most-recent-file most_recent_image = images[-1] fg.logo(baseUrl + 'files?filename=' + most_recent_image) #posts to image as well as logo fg.author({'name': 'Jason Webster', 'email': '*****@*****.**'}) fg.podcast.itunes_owner('Jason Webster', '*****@*****.**') fg.podcast.itunes_category('Science & Medicine', 'Natural Sciences') fg.description('The Best Podcast in the Universe') fg.skipDays('Saturday', 'Sunday') now = datetime.datetime.now(tz=datetime.timezone.utc) fg.pubDate(now) for article in articlesList: fe = fg.add_entry() mp3 = article['articleId'] + '.mp3' png = article['articleId'] + '.png' fe.id(baseUrl + 'files?filename=' + mp3) fe.title(article['title']) fe.enclosure(baseUrl + 'files?filename=' + mp3, str(os.path.getsize(directoryPath + '/' + mp3)), 'audio/mpeg') fe.description('Authors: ' + article['authors'] + '\n' + 'Article Id: ' + article['articleId'] + '\n' + 'Abstract: ' + article['abstract'] + '\n') fe.podcast.itunes_explicit('clean') fe.podcast.itunes_image(baseUrl + 'files?filename=' + png) fe.pubdate(now) mp3_file = MP3(directoryPath + '/' + article['articleId'] + '.mp3') duration = str(datetime.timedelta(seconds=int(mp3_file.info.length))) fe.podcast.itunes_duration(duration) fg.rss_file(directoryPath + '/podcast.xml', pretty=True)
def feed(request, slug): """ Return an RSS feed :param request: The request object. :param slug: The slug for the requested feed. :return: The rendered feed. """ out_feed = get_object_or_404(OutFeed, slug=slug) url = "{}://{}{}".format(request.scheme, request.get_host(), reverse('posts', args=[slug])) fg = FeedGenerator() fg.id(url) fg.title(out_feed.title) fg.link(href=url, rel='alternate') fg.description(out_feed.description) fg.pubDate(out_feed.updated) in_feeds = InFeed.objects.filter(out_feed=out_feed, enabled=True) posts = Post.objects.filter(in_feed__in=[f.id for f in in_feeds], enabled=True) for post in posts: if post.override_desc is not None and post.override_desc != '': description = post.override_desc else: description = post.description description = description + ' [<a href="{}">Continue reading...</a>]'.\ format(post.link) fe = fg.add_entry() fe.id(post.id) fe.title(post.title) fe.description(description) fe.author({'name': post.author}) fe.link(href=post.link) fe.guid(post.guid) if post.override_pub is not None: fe.pubDate(post.override_pub) fe.updated(post.override_pub) else: fe.pubDate(post.published) fe.updated(post.published) data = fg.atom_str(pretty=True) response = HttpResponse(data, content_type='application/rss+xml') response['Content-Length'] = len(data) return response
def _build_feed(self): router = Router(self._config) fp = filterPlaceholders fg = FeedGenerator() fg.id(self._config.site_prefix) fg.title(self._config.site_name) fg.author({ 'name': fp(self._config.author), 'email': fp(self._config.email) }) fg.link(href=self._config.domain_name + self._config.site_prefix, rel='alternate') fg.logo(fp(self._config.site_logo)) fg.subtitle(fp(self._config.description)) fg.description(fp(self._config.description) or ' ') fg.language(fp(self._config.language)) fg.lastBuildDate(moment.now().locale(self._config.locale).date) fg.pubDate(moment.now().locale(self._config.locale).date) for post in self._posts[:10]: meta = post.meta fe = fg.add_entry() fe.title(meta['title']) fe.link(href=self._config.domain_name + router.gen_permalink_by_meta(meta)) fe.guid(router.gen_permalink_by_meta(meta), True) fe.pubDate(meta['date'].date) fe.author({ 'name': meta['author'], 'uri': fp(self._config.author_homepage), 'email': fp(self._config.email) }) fe.content(post.parsed) if not os.path.exists( unify_joinpath(self._config.build_dir, 'feed/atom')): os.makedirs(unify_joinpath(self._config.build_dir, 'feed/atom')) fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.xml')) fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.html')) fg.atom_file( unify_joinpath(self._config.build_dir, 'feed/atom/index.xml')) fg.atom_file( unify_joinpath(self._config.build_dir, 'feed/atom/index.html'))
def saveFeed(listings, title, path): url = githubRepoURL + title + ".xml" # Create a feed generator fg = FeedGenerator() # Create the feed's title fg.id(url) fg.title(title) fg.author({'name': 'Ben Snell'}) fg.description("NYC 2BR Apartment Listings in " + title) fg.link(href=url, rel='alternate') fg.language('en') time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "-05:00" fg.pubDate(time) fg.updated(time) for apt in listings: e = fg.add_entry() e.id(apt[0]) e.title("$" + apt[1] + " // " + apt[4]) e.link(href=apt[0]) text = "" if apt[5] != "": imgs = apt[5].split(" ") for i in range(len(imgs)): text += "<img src=\"" + imgs[i] + "\" /> " if i == 0: text += "<p>" + apt[8] + "</p>" else: text += "<p>" + apt[8] + "</p>" e.content(type="html", content=text) # This doesn't seem to work: e.pubDate(datetime2RSSString(clDate(apt[2]))) e.updated(datetime2RSSString(clDate(apt[2]))) fg.atom_str(pretty=True) fg.atom_file(path)
def render_rss(self, post_list): router = Router(self._config) fg = FeedGenerator() fg.id(self._config.site_prefix) fg.title(self._config.site_name) fg.author({'name': self._config.author, 'email': self._config.email}) fg.link(href=self._config.site_prefix, rel='alternate') fg.logo(self._config.site_logo) fg.subtitle(self._config.description) fg.language('zh-CN') fg.lastBuildDate(moment.now().locale('Asia/Shanghai').date) fg.pubDate(moment.now().locale('Asia/Shanghai').date) for post in post_list[:10]: meta = post.meta fe = fg.add_entry() fe.title(meta['title']) fe.link(href=router.gen_permalink_by_meta(meta)) fe.guid(router.gen_permalink_by_meta(meta), True) fe.pubDate(meta['date'].date) fe.author({ 'name': meta['author'], 'uri': self._config.author_homepage, 'email': self._config.email }) fe.content(post.parsed) if not os.path.exists( Utils.unify_joinpath(self._config.build_dir, 'feed/atom')): os.makedirs( Utils.unify_joinpath(self._config.build_dir, 'feed/atom')) fg.rss_file( Utils.unify_joinpath(self._config.build_dir, 'feed/index.xml')) fg.rss_file( Utils.unify_joinpath(self._config.build_dir, 'feed/index.html')) fg.atom_file( Utils.unify_joinpath(self._config.build_dir, 'feed/atom/index.xml')) fg.atom_file( Utils.unify_joinpath(self._config.build_dir, 'feed/atom/index.html'))
def saveFeed(listings, title, path): url = githubRepoURL + title + ".xml" # Create a feed generator fg = FeedGenerator() # Create the feed's title fg.id(url) fg.title(title) fg.author({'name': 'Ben Snell'}) fg.description("Art Show Open Call Opportunities") fg.link(href=url, rel='alternate') fg.language('en') time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "-05:00" fg.pubDate(time) fg.updated(time) for item in listings: e = fg.add_entry() e.id(item["ID"]) # Get a clearer title thisTitle = getShortDate(item["Application Deadline"]) + item["Title"] e.title(thisTitle) # for key, value in item.items(): # print(key, value); # print(item["url"]) # if "url" in item: e.link(href=item["url"]) text = getHtmlFormattedListing(item) e.content(type="html", content=text) # This doesn't seem to work: # e.pubDate( datetime2RSSString(clDate(apt[2])) ) # e.updated( datetime2RSSString(clDate(apt[2])) ) fg.atom_str(pretty=True) fg.atom_file(path)
def generate_feed(input_file, output_file): fg = FeedGenerator() fg.load_extension('podcast', rss=True) ## RSS tags # Required fg.title(TITLE) fg.link(href=LINK) fg.description(DESCRIPTION) # Optional fg.language('en') fg.image(url=IMAGE_URL, title=TITLE, link=LINK) fg.ttl(720) fg.webMaster(CONTACT['name']) now = datetime.datetime.now() tz = pytz.timezone('Europe/Amsterdam') fg.pubDate(tz.localize(now)) # iTunes fg.podcast.itunes_author('Dan LeBatard') fg.podcast.itunes_category(itunes_category='Sports & Recreation', itunes_subcategory='Professional') fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit='clean') fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email']) # Add items items = read_items(input_file) for item in items: fe = fg.add_entry() ## RSS tags fe.id(item['guid']) fe.title(item['title']) fe.description(item['description']) fe.enclosure(item['link'], 0, 'audio/mpeg') fe.pubdate(item['pubDate']) # Finish off the file fg.rss_str(pretty=True) fg.rss_file(output_file)
def initialize_feed(logger, new_feed_elements): """ Instanciate the FeedGenerator class with some data retrieved from the previous function :param logger: custom logger :type logger: logger object :param new_feed_elements: elements of the new feed that we are going to generate :type new_feed_elements: dictionary :return fg: FeedGenerator class :rtype fg: object """ # Initialize feedgen fg = FeedGenerator() fg.title(new_feed_elements['feed_title']) fg.link(href=new_feed_elements['feed_link'], rel='alternate') fg.description(new_feed_elements['feed_description']) if 'feed_pubdate' in new_feed_elements: fg.pubDate(new_feed_elements['feed_pubdate']) logger.debug('Feed initialized for {0}'.format( new_feed_elements['feed_title'])) return fg
feed_generator.title("OWASP Cheat Sheet Series update") feed_generator.description("List of the last updates on the content") feed_generator.author({ "name": "Core team", "email": "*****@*****.**" }) feed_generator.link({ "href": "https://cheatsheetseries.owasp.org", "rel": "self" }) feed_generator.link({ "href": "https://github.com/OWASP/CheatSheetSeries", "rel": "alternate" }) feed_generator.language("en") feed_generator.pubDate(current_date) feed_generator.lastBuildDate(current_date) for pull_request in pull_requests: # Take only merged PR if pull_request["merged_at"] is None: continue # Convert merge date from 2019-08-25T06:36:35Z To Sun, 19 May 2002 15:21:36 GMT merge_date_src = pull_request["merged_at"] merge_date_dst = datetime.strptime( merge_date_src, "%Y-%m-%dT%H:%M:%SZ").strftime("%a, %d %B %Y %H:%M:%S GMT") feed_entry = feed_generator.add_entry() feed_entry.id(pull_request["html_url"]) feed_entry.title(pull_request["title"]) feed_entry.link({"href": pull_request["html_url"], "rel": "self"}) feed_entry.link({"href": pull_request["url"], "rel": "alternate"})
def setUp(self): fg = FeedGenerator() self.nsAtom = "http://www.w3.org/2005/Atom" self.nsRss = "http://purl.org/rss/1.0/modules/content/" self.feedId = 'http://lernfunk.de/media/654321' self.title = 'Some Testfeed' self.authorName = 'John Doe' self.authorMail = '*****@*****.**' self.author = {'name': self.authorName, 'email': self.authorMail} self.linkHref = 'http://example.com' self.linkRel = 'alternate' self.logo = 'http://ex.com/logo.jpg' self.subtitle = 'This is a cool feed!' self.link2Href = 'http://larskiesow.de/test.atom' self.link2Rel = 'self' self.language = 'en' self.categoryTerm = 'This category term' self.categoryScheme = 'This category scheme' self.categoryLabel = 'This category label' self.cloudDomain = 'example.com' self.cloudPort = '4711' self.cloudPath = '/ws/example' self.cloudRegisterProcedure = 'registerProcedure' self.cloudProtocol = 'SOAP 1.1' self.icon = "http://example.com/icon.png" self.contributor = { 'name': "Contributor Name", 'uri': "Contributor Uri", 'email': 'Contributor email' } self.copyright = "The copyright notice" self.docs = 'http://www.rssboard.org/rss-specification' self.managingEditor = '*****@*****.**' self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \ '1 r (SS~~000 1))' self.skipDays = 'Tuesday' self.skipHours = 23 self.textInputTitle = "Text input title" self.textInputDescription = "Text input description" self.textInputName = "Text input name" self.textInputLink = "Text input link" self.ttl = 900 self.webMaster = '*****@*****.**' fg.id(self.feedId) fg.title(self.title) fg.author(self.author) fg.link(href=self.linkHref, rel=self.linkRel) fg.logo(self.logo) fg.subtitle(self.subtitle) fg.link(href=self.link2Href, rel=self.link2Rel) fg.language(self.language) fg.cloud(domain=self.cloudDomain, port=self.cloudPort, path=self.cloudPath, registerProcedure=self.cloudRegisterProcedure, protocol=self.cloudProtocol) fg.icon(self.icon) fg.category(term=self.categoryTerm, scheme=self.categoryScheme, label=self.categoryLabel) fg.contributor(self.contributor) fg.copyright(self.copyright) fg.docs(docs=self.docs) fg.managingEditor(self.managingEditor) fg.rating(self.rating) fg.skipDays(self.skipDays) fg.skipHours(self.skipHours) fg.textInput(title=self.textInputTitle, description=self.textInputDescription, name=self.textInputName, link=self.textInputLink) fg.ttl(self.ttl) fg.webMaster(self.webMaster) fg.updated('2017-02-05 13:26:58+01:00') fg.pubDate('2017-02-05 13:26:58+01:00') fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...') fg.image(url=self.logo, title=self.title, link=self.link2Href, width='123', height='123', description='Example Inage') self.fg = fg
def setUp(self): fg = FeedGenerator() self.nsAtom = "http://www.w3.org/2005/Atom" self.nsRss = "http://purl.org/rss/1.0/modules/content/" self.feedId = 'http://lernfunk.de/media/654321' self.title = 'Some Testfeed' self.authorName = 'John Doe' self.authorMail = '*****@*****.**' self.author = {'name': self.authorName, 'email': self.authorMail} self.linkHref = 'http://example.com' self.linkRel = 'alternate' self.logo = 'http://ex.com/logo.jpg' self.subtitle = 'This is a cool feed!' self.link2Href = 'http://larskiesow.de/test.atom' self.link2Rel = 'self' self.language = 'en' self.categoryTerm = 'This category term' self.categoryScheme = 'This category scheme' self.categoryLabel = 'This category label' self.cloudDomain = 'example.com' self.cloudPort = '4711' self.cloudPath = '/ws/example' self.cloudRegisterProcedure = 'registerProcedure' self.cloudProtocol = 'SOAP 1.1' self.icon = "http://example.com/icon.png" self.contributor = {'name': "Contributor Name", 'uri': "Contributor Uri", 'email': 'Contributor email'} self.copyright = "The copyright notice" self.docs = 'http://www.rssboard.org/rss-specification' self.managingEditor = '*****@*****.**' self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \ '1 r (SS~~000 1))' self.skipDays = 'Tuesday' self.skipHours = 23 self.textInputTitle = "Text input title" self.textInputDescription = "Text input description" self.textInputName = "Text input name" self.textInputLink = "Text input link" self.ttl = 900 self.webMaster = '*****@*****.**' fg.id(self.feedId) fg.title(self.title) fg.author(self.author) fg.link(href=self.linkHref, rel=self.linkRel) fg.logo(self.logo) fg.subtitle(self.subtitle) fg.link(href=self.link2Href, rel=self.link2Rel) fg.language(self.language) fg.cloud(domain=self.cloudDomain, port=self.cloudPort, path=self.cloudPath, registerProcedure=self.cloudRegisterProcedure, protocol=self.cloudProtocol) fg.icon(self.icon) fg.category(term=self.categoryTerm, scheme=self.categoryScheme, label=self.categoryLabel) fg.contributor(self.contributor) fg.copyright(self.copyright) fg.docs(docs=self.docs) fg.managingEditor(self.managingEditor) fg.rating(self.rating) fg.skipDays(self.skipDays) fg.skipHours(self.skipHours) fg.textInput(title=self.textInputTitle, description=self.textInputDescription, name=self.textInputName, link=self.textInputLink) fg.ttl(self.ttl) fg.webMaster(self.webMaster) fg.updated('2017-02-05 13:26:58+01:00') fg.pubDate('2017-02-05 13:26:58+01:00') fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...') fg.image(url=self.logo, title=self.title, link=self.link2Href, width='123', height='123', description='Example Inage') self.fg = fg