def generate_feed(output_file, exclude_highlights=True): # Parse RSS feed d = feedparser.parse(ESPN_RSS_FEED) IMAGE_URL = d.feed.image["href"] # RSS feed generation fg = FeedGenerator() fg.load_extension("podcast", rss=True) ## RSS tags # Required fg.title(d.feed.title) fg.link(href="https://github.com/aaearon/lebatard-show-rss") fg.description(d.feed.description) # Optional fg.language(d.feed.language) fg.image(IMAGE_URL) fg.subtitle(d.feed.subtitle) # iTunes fg.podcast.itunes_author(d.feed.author) fg.podcast.itunes_category(itunes_category=d.feed.category) fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit="clean") fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"]) tz = pytz.timezone("America/Los_Angeles") for e in d.entries: if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600: pass else: fe = fg.add_entry() fe.id(e.id) fe.title(e.title) fe.description(e.description) fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"]) fe.podcast.itunes_summary(e.description) fe.podcast.itunes_subtitle(e.description) fe.podcast.itunes_duration(e["itunes_duration"]) dt = datetime.fromtimestamp(time.mktime(e.published_parsed)) date = tz.localize(dt) # Local hour if "Show: " in e.title: fe.published(date) elif "Hour 1" in e.title: fe.published(date + timedelta(hours=1)) elif "Hour 2" in e.title: fe.published(date + timedelta(hours=2)) elif "Hour 3" in e.title: fe.published(date + timedelta(hours=3)) else: fe.published(date + timedelta(hours=-1)) fg.rss_str(pretty=True) fg.rss_file(output_file)
def export_feed(self, output): fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') fg.podcast.itunes_image("%s/icon.png" % URL_BASE) fg.title('JW.ORG Magazines') fg.description('Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.') fg.link(href="%s/%s" % (URL_BASE, output), rel='self') manifest = self._load() entries = [] for lang, mnemonics in manifest.items(): for mnemonic, issues in mnemonics.items(): for issue, data in issues.items(): entries.append((issue, data)) for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True): fe = fg.add_entry() fe.id( entry['hash'] ) fe.title( entry['title'] ) fe.description( entry['title'] ) fe.published( pytz.utc.localize( entry['created_on'] ) ) url = "%s/%s" % (URL_BASE, os.path.basename(entry['file'])) mime = 'audio/mpeg' fe.enclosure(url, str(entry['duration']), mime) fe.link(href=url, type=mime) fg.rss_str(pretty=True) fg.rss_file(os.path.join(CACHE_DIR, output))
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz): """Create the podcast file.""" fg = FeedGenerator() fg.load_extension('podcast') url = "{}{}.xml".format(base_public_url, show.id) fg.id(url.split('.')[0]) fg.title(show.name) fg.image(show.image_url) fg.description(show.description) fg.link(href=url, rel='self') # collect all mp3s for the given show all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id))) for filepath in all_mp3s: filename = os.path.basename(filepath) mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz) mp3_size = os.stat(filepath).st_size mp3_url = base_public_url + filename mp3_id = filename.split('.')[0] title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date) # build the rss entry fe = fg.add_entry() fe.id(mp3_id) fe.pubdate(mp3_date) fe.title(title) fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
class TestExtensionSyndication(unittest.TestCase): SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'} def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('syndication') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_update_period(self): for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'): self.fg.syndication.update_period(period_type) root = etree.fromstring(self.fg.rss_str()) a = root.xpath('/rss/channel/sy:UpdatePeriod', namespaces=self.SYN_NS) assert a[0].text == period_type def test_update_frequency(self): for frequency in (1, 100, 2000, 100000): self.fg.syndication.update_frequency(frequency) root = etree.fromstring(self.fg.rss_str()) a = root.xpath('/rss/channel/sy:UpdateFrequency', namespaces=self.SYN_NS) assert a[0].text == str(frequency) def test_update_base(self): base = '2000-01-01T12:00+00:00' self.fg.syndication.update_base(base) root = etree.fromstring(self.fg.rss_str()) a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS) assert a[0].text == base
class TestExtensionTorrent(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('torrent') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_podcastEntryItems(self): fe = self.fg.add_item() fe.title('y') fe.torrent.filename('file.xy') fe.torrent.infohash('123') fe.torrent.contentlength('23') fe.torrent.seeds('1') fe.torrent.peers('2') fe.torrent.verified('1') assert fe.torrent.filename() == 'file.xy' assert fe.torrent.infohash() == '123' assert fe.torrent.contentlength() == '23' assert fe.torrent.seeds() == '1' assert fe.torrent.peers() == '2' assert fe.torrent.verified() == '1' # Check that we have the item in the resulting XML ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'} root = etree.fromstring(self.fg.rss_str()) filename = root.xpath('/rss/channel/item/torrent:filename/text()', namespaces=ns) assert filename == ['file.xy']
def add_feed(self, feed_id, yt_playlist): feed = FeedGenerator() feed.load_extension("podcast") feed.id(feed_id) feed.title(yt_playlist["snippet"]["title"]) feed.author({"name": yt_playlist["snippet"]["channelTitle"]}) feed.description(yt_playlist["snippet"]["description"]) feed.logo(yt_playlist["snippet"]["thumbnails"]["standard"]["url"]) feed.link(href="https://www.youtube.com/playlist?list=%s" % (yt_playlist["id"])) feed.rss_str(pretty=True) feed.last_updated = 0 self.feeds[feed_id] = feed return feed
def podcast_feed(): logo_url = url_for("static", filename="wpclogo_big.png", _external=True) fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Technology', 'Podcasting') fg.podcast.itunes_image(logo_url) fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'}) fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self') fg.title('WPC Coders Podcast') fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.') # NOQA episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'), ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'), ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"), # NOQA ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")] # NOQA for epfile, eptitle, epdate, epdescription in episodes[::-1]: epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile) fe = fg.add_entry() fe.id(epurl) fe.title(eptitle) fe.description(epdescription) fe.podcast.itunes_image(logo_url) fe.pubdate(epdate.replace(tzinfo=pytz.UTC)) fe.enclosure(epurl, 0, 'audio/mpeg') return Response(response=fg.rss_str(pretty=True), status=200, mimetype='application/rss+xml')
def get_feed(query, title, description, link, image): """Get an RSS feed from the results of a query to the YouTube API.""" service = _get_youtube_client() videos = service.search().list(part='snippet', **query, order='date', type='video', safeSearch='none').execute() fg = FeedGenerator() fg.load_extension('podcast') fg.title(title) fg.description(description) fg.link(href=link, rel='alternate') fg.image(image) youtube_plugin = get_plugin_from_settings() for video in videos['items']: try: video_url = youtube_plugin.extract_link( "https://www.youtube.com/watch?v=" + video['id']['videoId']) except PluginException: continue fe = fg.add_entry() fe.id(video['id']['videoId']) fe.title(video['snippet']['title']) fe.description(video['snippet']['description']) fe.pubdate(dateutil.parser.parse(video['snippet']['publishedAt'])) fe.podcast.itunes_image(video['snippet']['thumbnails']['high']['url']) video_info = requests.head(video_url) fe.enclosure(video_url, video_info.headers['Content-Length'], video_info.headers['Content-Type']) return fg.rss_str(pretty=True)
def makeRss(self): fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://hypecast.blackmad.com/' + self.mode) fg.title('Hype Machine Robot Radio: ' + self.mode) fg.author( {'name':'David Blackmad','email':'*****@*****.**'} ) fg.logo('http://dump.blackmad.com/the-hype-machine.jpg') fg.language('en') fg.link(href='http://hypecast.blackmad.com/' + self.mode) fg.description('Hype Machine Robot Radio: ' + self.mode) description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)]) fe = fg.add_entry() fe.title(self.track_name) fe.description(description) fe.id(self.filename) # add length print(self.relative_dir) print(self.filename) fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg") rss_str = fg.rss_str() newItem = ET.fromstring(rss_str)[0].find('item') out = open(self.get_filename('xml'), 'w') out.write(ET.tostring(newItem)) out.close() self.updateRss()
def get_feed(atom=False): fg = FeedGenerator() domain = get_domain() items = get_posts({"limit": "10"}, full=True)["results"] fg.id("http://%s/"%domain) fg.title("Blog do MatrUFSC2") fg.description("Feed do blog do MatrUFSC2, onde noticias e novos recursos sao anunciados primeiro!") fg.language('pt-BR') fg.link({"href":"/blog/feed","rel":"self"}) fg.updated(items[0]["posted_at"].replace(tzinfo=pytz.UTC)) for item in items: entry = fg.add_entry() entry.title(item["title"]) tree = html.fromstring(item["summary"]) cleaner = Cleaner(allow_tags=[]) tree = cleaner.clean_html(tree) text = tree.text_content() entry.description(text, True) entry.link({"href":item["link"],"rel":"self"}) entry.content(item["body"]) entry.published(item["posted_at"].replace(tzinfo=pytz.UTC)) entry.updated(item["posted_at"].replace(tzinfo=pytz.UTC)) entry.category({"label": item["category"]["title"], "term": item["category"]["slug"]}) entry.id(item["id"]) if atom: return fg.atom_str(pretty=True) else: return fg.rss_str(pretty=True)
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = FeedGenerator() feed.title(feed_title) feed.description(faker.sentence()) feed.link({'href': WP_FEED_URL}) entry = feed.add_entry() entry.title(title) entry.link({'href': url}) entry.author(name=faker.name()) entry.content(content, type="cdata") if summary: entry.description(summary) if enclosure: entry.enclosure(url=enclosure['url'], type=enclosure['type'], length=str(faker.pyint())) if media_thumbnail: feed.load_extension('media') entry.media.thumbnail({'url': media_thumbnail}) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) entry.published(published) entry.updated(faker.date_time_between(start_date=published, tzinfo=tz)) return feed.rss_str().decode('utf8')
def GET(self): cherrypy.response.headers["Access-Control-Allow-Origin"] = "*" fg = FeedGenerator() #TODO create icon # fg.icon('http://www.det.ua.pt') fg.id(config.get('rss','id')) fg.title(config.get('rss','title')) fg.subtitle(config.get('rss','subtitle')) fg.description(config.get('rss','description')) fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')}) fg.language(config.get('rss','language')) fg.link(href=config.get('rss','href'), rel='related') client = EmailClient() for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]): cherrypy.log("RSS Entry: "+msgn) em = client.getEMail(msgn) entry = fg.add_entry() entry.title(em['subject']) entry.author({'name': em['From']['name'], 'email': em['From']['email']}) entry.guid(config.get("main","baseurl")+'news/'+msgn) entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'}) entry.pubdate(em['date']) entry.content(em['body']) return fg.rss_str(pretty=True)
def generate_rss(language, since): url = "{0}?since={1}".format(language["url"], since) file_name = "github_trends_{0}_{1}.rss".format(language["key"], since) title = "GitHub Trends - {0} - {1}".format(language["name"], since.capitalize()) print(url) page = requests.get(url) tree = html.fromstring(page.content) lis = tree.cssselect("ol.repo-list li") fg = FeedGenerator() fg.title(title) fg.link(href="http://github-trends.ryotarai.info/rss/{0}".format(file_name)) fg.description(title) index = 1 for li in lis: a = li.cssselect("h3 a")[0] description = "" ps = li.cssselect("p") if len(ps) > 0: description = ps[0].text_content().strip() fe = fg.add_entry() fe.link(href="https://github.com{0}".format(a.get("href"))) fe.title("{0} (#{1} - {2} - {3})".format( a.text_content().strip().replace(" / ", "/"), index, language["name"], since.capitalize(), )) fe.description(description) index += 1 rssfeed = fg.rss_str(pretty=True) s3.Object(bucket, 'rss/{0}'.format(file_name)).put(Body=rssfeed, ContentType="application/xml")
def latestRss(userID): userID = userID.lower() shows = {} episodes = [] today = date.today().strftime('%Y-%m-%d') for showID in series.getUserShowList(userID): shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True) episodes.extend((showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today) episodes.sort(key=episodeAirdateKey, reverse=True) feed = FeedGenerator() feed.id(userID) feed.title('%s\'s shows' % userID) feed.description('Unseen episodes') feed.link(href=request.url_root) feed.language('en') for showID, episode in episodes: entry = feed.add_entry() entry.id('%s/%s' % (showID, episode['episode_id'])) entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title'])) return feed.rss_str(pretty=True)
class TestExtensionDc(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('dc') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_entryLoadExtension(self): fe = self.fg.add_item() try: fe.load_extension('dc') except ImportError: pass # Extension already loaded def test_elements(self): for method in dir(self.fg.dc): if method.startswith('dc_'): m = getattr(self.fg.dc, method) m(method) assert m() == [method] self.fg.id('123') assert self.fg.atom_str() assert self.fg.rss_str()
def rss(): config = public_app.config['feed'] fg = FeedGenerator() fg.id('%s/blog' % Config.BASE_URL) fg.title(config['title']) fg.author( {'name': config['author'],'email': config['email']} ) fg.description(config['desc']) fg.link( href=Config.BASE_URL, rel='alternate' ) query = { 'id': { '$regex': 'blog' }, 'current': True, 'meta.hide': { '$ne': True } } posts = db.pages.find(query).sort('meta.created', -1)[:20] for post in posts: fe = fg.add_entry() fe.title(post['meta']['title']) if 'author' in post['meta']: fe.author( {'name': post['meta']['author'],'email': config['email']} ) else: fe.author( {'name': config['author'],'email': config['email']} ) fe.description(do_truncate(post['content'], 300)) fe.link(href="%s/%s" % (Config.BASE_URL, post['id']), rel='alternate') fe.pubdate(utc.localize(post['meta']['created'])) fe.content(post['content']) response.headers['Content-Type'] = 'application/rss+xml' return fg.rss_str(pretty=True)
def generate_empty_rss_feed(group_name): fg = FeedGenerator() fg.title("VTS Raspored - Grupa " + group_name) fg.author( {'name':'Veselin Romic','email':'*****@*****.**'} ) fg.language('sr') fg.description("Automatski se salje notifikacija kad se promeni grupni raspored.") fg.link(href='https://eref.vts.su.ac.rs/') return fg.rss_str(pretty=True)
def main(argv): ap = argparse.ArgumentParser( description=''' Render RSS and Atom feeds from a CSV of food inspection data. ''') ap.add_argument( '-v', '--verbose', action='count', dest='verbosity', default=0, help='increase global logging verbosity; can be used multiple times') ap.add_argument( '-f', '--format', choices=['rss', 'atom'], default='atom', help=''' specify the format to use when rendering the feed (default: %(default)s)') ''') ap.add_argument( '-n', '--num_incidents', metavar='<num>', type=int, default=10, help='render <num> recent incidents in the feed (default: %(default)s)') ap.add_argument( 'flavor', nargs='?', default='all', choices=['all', 'failures'], help='select the flavor of feed to render (default: %(default)s)') args = ap.parse_args() logging.basicConfig( level=logging.ERROR - args.verbosity * 10, style='{', format='{}: {{message}}'.format(ap.prog)) fg = FeedGenerator() fg.id('http://pgriess.github.io/dallas-foodscores/') fg.link(href=fg.id(), rel='self') fg.title('Dallas Food Inspection Scores') fg.subtitle(''' Food inspection scores from the official City of Dallas dataset; updated daily ''') fg.description(fg.subtitle()) fg.language('en') fg.author( name='Peter Griess', email='*****@*****.**', uri='https://twitter.com/pgriess') for i in get_inspections_to_feed(sys.stdin, args.num_incidents, args.flavor): fe = fg.add_entry() fe.title('{name} at {address} scored {score}'.format( name=i.name, address=i.address, score=i.score)) fe.id(fg.id() + '#!/' + str(abs(hash(i)))) fe.link(href=fe.id(), rel='alternate') fe.content(fe.title()) fe.published(TZ.localize(i.date)) if args.format == 'atom': print(fg.atom_str(pretty=True)) else: print(fg.rss_str(pretty=True))
def generate_feed(channel_dict, file_metadatas): fg = FeedGenerator() fg.load_extension("podcast") fg.link(href=channel_dict["url"], rel="self") fg.title(channel_dict["title"]) fg.description(channel_dict["description"]) for file_metadata in file_metadatas: add_entry(fg, file_metadata) return fg.rss_str(pretty=True)
def rss(request): # track it! # v=1 // Version. # &tid=UA-XXXXX-Y // Tracking ID / Property ID. # &cid=555 // Anonymous Client ID. # &t=pageview // Pageview hit type. # &dh=mydemo.com // Document hostname. # &dp=/home // Page. # &dt=homepage // Title. angrates_uuid = uuid.UUID('f93c5388-f60b-5159-bbfc-d08d6f7b401f') x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') cid = uuid.uuid5(angrates_uuid, ip) data = { 'v': 1, 'tid': 'UA-19269567-1', 'cid': cid, 't': 'pageview', 'dh': 'armstrongandgettybingo.com', 'dp': '/rss/', 'dt': 'Podcast', } requests.post('https://www.google-analytics.com/collect', data=data) fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://www.armstrongandgettybingo.com/rss') fg.podcast.itunes_category('News & Politics', 'Conservative (Right)') fg.podcast.itunes_explicit('no') fg.title('The Armstrong and Getty Show (Bingo)') fg.author( {'name':'Ben Friedland','email':'*****@*****.**'} ) fg.link( href='http://www.armstrongandgettybingo.com', rel='alternate' ) fg.logo('https://s3-us-west-1.amazonaws.com/bencast/bingologo.png') fg.subtitle('Armstrong and Getty Bingo') fg.description('The Armstrong and Getty Show - Unofficial Feed including Archives back to 2001.') fg.link( href='http://www.armstrongandgettybingo.com/rss', rel='self' ) fg.language('en') pacific = pytz.timezone('America/Los_Angeles') for hour in Hour.objects.all().order_by('-pub_date'): fe = fg.add_entry() fe.id(hour.link) fe.title(hour.title) fe.description(hour.description) fe.enclosure(hour.link, 0, 'audio/mpeg') fe.published(pacific.localize(hour.pub_date)) return HttpResponse(fg.rss_str(pretty=True), content_type='application/rss+xml')
def make_feed(filename='epistles.xml'): fg = FeedGenerator() fg.title('Daily Epistles') fg.author({'name': 'Tim Hopper'}) fg.subtitle('Listen to the New Testament epistles each month.') fg.language('en') fg.link(href='http://www.crossway.com', rel='alternate') for day, division in enumerate(get_divisons(), 1): entry = fg.add_entry() entry.id(division) entry.title(division) pubdate = datetime.datetime(year=datetime.datetime.now().year, month=datetime.datetime.now().month, day=day, hour=pubhour, tzinfo=tz) entry.published(pubdate) entry.enclosure(get_url(division), 0, 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file('epistles.xml')
def generate_feed(input_file, output_file): fg = FeedGenerator() fg.load_extension('podcast', rss=True) ## RSS tags # Required fg.title(TITLE) fg.link(href=LINK) fg.description(DESCRIPTION) # Optional fg.language('en') fg.image(url=IMAGE_URL, title=TITLE, link=LINK) fg.ttl(720) fg.webMaster(CONTACT['name']) now = datetime.datetime.now() tz = pytz.timezone('Europe/Amsterdam') fg.pubDate(tz.localize(now)) # iTunes fg.podcast.itunes_author('Dan LeBatard') fg.podcast.itunes_category(itunes_category='Sports & Recreation', itunes_subcategory='Professional') fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit='clean') fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email']) # Add items items = read_items(input_file) for item in items: fe = fg.add_entry() ## RSS tags fe.id(item['guid']) fe.title(item['title']) fe.description(item['description']) fe.enclosure(item['link'], 0, 'audio/mpeg') fe.pubdate(item['pubDate']) # Finish off the file fg.rss_str(pretty=True) fg.rss_file(output_file)
def render_feed(text_paths, outpath): # http://rhodesmill.org/brandon/feed # http://rhodesmill.org/brandon/category/python/feed # http://rhodesmill.org/brandon/feed/atom/ t0 = datetime.min.time() def fix(d): dt = datetime.combine(d, t0) return timezone('US/Eastern').localize(dt) posts = [post_info(path) for path in text_paths if date_of(path)] posts = sorted(posts, key=lambda post: post['date']) posts = posts[-1:] most_recent_date = max(post['date'] for post in posts) def full(url): return 'http://rhodesmill.org/' + url.lstrip('/') fg = FeedGenerator() fg.id(full('/')) fg.author({'name': 'Brandon Rhodes'}) fg.language('en') fg.link(href=full('/brandon/'), rel='alternate') if 'python' in outpath: fg.link(href=full('/brandon/category/python/feed/'), rel='self') else: fg.link(href=full('/brandon/feed/'), rel='self') fg.subtitle('Thoughts and ideas from Brandon Rhodes') fg.title("Let's Discuss the Matter Further") fg.updated(fix(most_recent_date)) for post in posts: url = full(post['url_path']) excerpt = truncate_at_more(post['body_html'], url) fe = fg.add_entry() fe.content(excerpt, type='html') fe.guid(url, permalink=True) fe.id(url) fe.link({'href': url}) fe.published(fix(post['date'])) fe.title(post['title']) fe.updated(fix(post['date'])) rss = fg.rss_str(pretty=True) fg.link(href=full('/brandon/feed/atom/'), rel='self', replace=True) atom = fg.atom_str(pretty=True) return rss, atom
def rss(self, name): cherrypy.response.headers['Content-Type'] = 'application/rss+xml' fg = FeedGenerator() cursor.execute("SELECT * FROM RSS WHERE source = %s ORDER BY filedate DESC;", (name,)) fg.id(config.get('Options', 'rss_url') + name) fg.title(config.get(name, 'rss_title')) fg.description(config.get(name, 'rss_desc')) fg.link( href=config.get('Options', 'rss_url')+name, rel='alternate' ) for row in cursor: fe = fg.add_entry() fe.id('https://drive.google.com/uc?id='+row[1]+'&export=download') fe.title(config.get(name, 'feed_entry_prepend')+row[2]+config.get(name, 'feed_entry_postpend')) fe.description(row[2] + ' - https://drive.google.com/uc?id='+row[1]+'&export=download') return fg.rss_str(pretty=True)
def get(self): fg = FeedGenerator() fg.id("http://test.ts") fg.title("My Test Feed") fg.icon("https://avatars1.githubusercontent.com/u/715660?v=3&s=32") fg.author({'name': "The Author", 'email': "*****@*****.**"}) fg.link(href="http://example.org/index.atom?page=2", rel="next") fg.link(href="http://test.ts", rel="alternate") fg.logo("https://avatars1.githubusercontent.com/u/715660?v=3&s=32") fg.description("Este é o monstro do lago 1") fg.subtitle("This is an example feed!") fg.language("en-us") # Handle this: #< sy:updatePeriod > hourly < / sy:updatePeriod > #< sy:updateFrequency > 1 < / sy:updateFrequency > fg.lastBuildDate(datetime.now(pytz.timezone("America/Sao_Paulo"))) fi = fg.add_item() fi.id("http://test.ts/id/1", ) #fi.link(link="http://test.ts/id/1") fi.title("Monstro do Lago 1") fi.description("Este é o monstro do lago 1") fi.comments("http://test.ts/id/1/comments") fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo"))) fi = fg.add_item() fi.id("http://test.ts/id/2") fi.title("Monstro do Lago 2") fi.description("Este é o monstro do lago 2") fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo"))) #test = fg.atom_str(pretty=True) rss_str = fg.rss_str(pretty=True) self.set_header("Content-Type", 'application/xml; charset="utf-8"') #self.set_header("Content-Disposition", # "attachment; filename='test.xml'") self.write(rss_str) #if regexp.search(word) is not None: # print # 'matched' if self.is_browser_mobile(): print("buu") else: print(self.request.headers["User-Agent"])
def make_rss(user, link=False): api = twitter.Api(**secrets) if link: filename = os.path.join(os.path.dirname(__file__), 'rss', user + '_links.rss') try: statuses = [s for s in api.GetUserTimeline(None, user, count=50) if len(s.urls) > 0] except twitter.TwitterError as e: return str(e), 404 else: filename = os.path.join(os.path.dirname(__file__), 'rss', user + '.rss') try: statuses = api.GetUserTimeline(None, user) except twitter.TwitterError as e: return str(e), 404 if len(statuses) == 0: return "No Tweets", 416 fg = FeedGenerator() fg.title(user + ' on twitter') fg.description('RSS feed from a twitter stream') fg.link(href='http://twitter.com/' + statuses[0].GetUser().screen_name, rel='self') for status in statuses: fe = fg.add_entry() fe.title(status.GetUser().screen_name+': '+status.GetText()) statusurl = 'http://twitter.com/' + statuses[0].GetUser().screen_name + '/status/' + status.GetIdStr() fe.guid(statusurl) fe.pubdate(status.created_at) fe.link(href=statusurl, rel='alternate') if link: #fe.link(href=status.urls[0].expanded_url, rel='alternate') urlsummary = '<br/> <ul>' for url in status.urls: urlsummary += '<a href="{0}">{0}</a> <br/>'.format(url.expanded_url) urlsummary += '</ul>' fe.summary(status.GetText() + '\n' + urlsummary) else: fe.summary(status.GetText()) fg.rss_file(filename) return fg.rss_str()
def daily_to_rss(daily): feed_generator = FeedGenerator() feed_generator.id(daily.url) feed_generator.link(href=daily.url, rel='alternate') feed_generator.description(u'RSS feed generated from: {}'.format(daily.url)) feed_generator.title(u'Daily Activity Log: {}'.format(daily.url)) feed_generator.language('en') for entry in daily.entries(): feed_entry = feed_generator.add_entry() feed_entry.title(u'{}: {}'.format(entry.type, entry.subject)) feed_entry.description(description=rss_description_from(entry)) feed_entry.pubdate(entry.date_rfc2822()) return feed_generator.rss_str(pretty=True)
def test_categoryHasDomain(self): fg = FeedGenerator() fg.title('some title') fg.link(href='http://www.dontcare.com', rel='alternate') fg.description('description') fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1') fe.title('some title') fe.category([ {'term': 'category', 'scheme': 'http://www.somedomain.com/category', 'label': 'Category', }]) result = fg.rss_str() assert b'domain="http://www.somedomain.com/category"' in result
def GET(self, query): engine = DIGBTSearchEngine(query=query) feed = FeedGenerator() feed.title(query) feed.link(href=engine.url) feed.description(query) for torrent in engine.torrents: entry = feed.add_entry() entry.id(torrent['id']) entry.title(torrent['title']) entry.link(href=torrent['url']) web.header('Content-Type', 'application/rss+xml') return feed.rss_str()
def build(self, rss=False): if rss: self.site.rss_url = '/rss.xml' fg = FeedGenerator() fg.title(self.site.name) fg.author({'name': self.site.author}) fg.link(href=self.site.base_url, rel='alternate') fg.subtitle(self.site.description) start = time.time() getLogger().info("Copy Assets") self.output.copyAssets(self.basedir) getLogger().info("Start Build of static content") posts = [] for p in self.source.getPosts(): if not p.published: getLogger().info("Ingnoring draft Post %s (%s)", p.title, p.slug) continue posts.append(p) if rss: fe = fg.add_entry() fe.id("%s/%s" % (self.site.base_url, p.permalink)) fe.link(href=fe.id(), rel='alternate') fe.title(p.title) fe.published(p.created_at.replace(tzinfo=pytz.timezone(self.site.timezone))) category = [] for t in p.tags: category.append({'term': t}) fe.category(category) fe.content(p.content) Output.storeData(os.path.join(self.basedir, p.permalink), self.output.render(self.site, post=p)) getLogger().debug("Adding Post \"%s\" (%s)", p.title, p.slug) posts = sorted(posts, key=lambda k: k.created_at, reverse=True) Output.storeData(os.path.join(self.basedir, 'index.html'), self.output.render(self.site, posts=posts, post=None, is_home=True, pagination=None)) if rss: Output.storeData(os.path.join(self.basedir, 'rss.xml'), fg.rss_str(pretty=True)) getLogger().debug("You awesome RSS feed has been generated") getLogger().info("It took %d seconds to generate your awesome blog" % (time.time() - start))
class TestExtensionMedia(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('media') self.fg.id('id') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_media_content(self): fe = self.fg.add_item() fe.id('id') fe.title('title') fe.content('content') fe.media.content(url='file1.xy') fe.media.content(url='file2.xy') fe.media.content(url='file1.xy', group=2) fe.media.content(url='file2.xy', group=2) fe.media.content(url='file.xy', group=None) ns = {'media': 'http://search.yahoo.com/mrss/', 'a': 'http://www.w3.org/2005/Atom'} # Check that we have the item in the resulting RSS root = etree.fromstring(self.fg.rss_str()) url = root.xpath('/rss/channel/item/media:group/media:content[1]/@url', namespaces=ns) assert url == ['file1.xy', 'file1.xy'] # There is one without a group url = root.xpath('/rss/channel/item/media:content[1]/@url', namespaces=ns) assert url == ['file.xy'] # Check that we have the item in the resulting Atom feed root = etree.fromstring(self.fg.atom_str()) url = root.xpath('/a:feed/a:entry/media:group/media:content[1]/@url', namespaces=ns) assert url == ['file1.xy', 'file1.xy'] fe.media.content(content=[], replace=True) assert fe.media.content() == [] def test_media_thumbnail(self): fe = self.fg.add_item() fe.id('id') fe.title('title') fe.content('content') fe.media.thumbnail(url='file1.xy') fe.media.thumbnail(url='file2.xy') fe.media.thumbnail(url='file1.xy', group=2) fe.media.thumbnail(url='file2.xy', group=2) fe.media.thumbnail(url='file.xy', group=None) ns = {'media': 'http://search.yahoo.com/mrss/', 'a': 'http://www.w3.org/2005/Atom'} # Check that we have the item in the resulting RSS root = etree.fromstring(self.fg.rss_str()) url = root.xpath( '/rss/channel/item/media:group/media:thumbnail[1]/@url', namespaces=ns) assert url == ['file1.xy', 'file1.xy'] # There is one without a group url = root.xpath('/rss/channel/item/media:thumbnail[1]/@url', namespaces=ns) assert url == ['file.xy'] # Check that we have the item in the resulting Atom feed root = etree.fromstring(self.fg.atom_str()) url = root.xpath('/a:feed/a:entry/media:group/media:thumbnail[1]/@url', namespaces=ns) assert url == ['file1.xy', 'file1.xy'] fe.media.thumbnail(thumbnail=[], replace=True) assert fe.media.thumbnail() == []
class TestExtensionPodcast(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('podcast') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_category_new(self): self.fg.podcast.itunes_category([{'cat': 'Technology', 'sub': 'Podcasting'}]) self.fg.podcast.itunes_explicit('no') self.fg.podcast.itunes_complete('no') self.fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss') self.fg.podcast.itunes_owner('John Doe', '*****@*****.**') ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'} root = etree.fromstring(self.fg.rss_str()) cat = root.xpath('/rss/channel/itunes:category/@text', namespaces=ns) scat = root.xpath('/rss/channel/itunes:category/itunes:category/@text', namespaces=ns) assert cat[0] == 'Technology' assert scat[0] == 'Podcasting' def test_category(self): self.fg.podcast.itunes_category('Technology', 'Podcasting') self.fg.podcast.itunes_explicit('no') self.fg.podcast.itunes_complete('no') self.fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss') self.fg.podcast.itunes_owner('John Doe', '*****@*****.**') ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'} root = etree.fromstring(self.fg.rss_str()) cat = root.xpath('/rss/channel/itunes:category/@text', namespaces=ns) scat = root.xpath('/rss/channel/itunes:category/itunes:category/@text', namespaces=ns) assert cat[0] == 'Technology' assert scat[0] == 'Podcasting' def test_podcastItems(self): fg = self.fg fg.podcast.itunes_author('Lars Kiesow') fg.podcast.itunes_block('x') fg.podcast.itunes_complete(False) fg.podcast.itunes_explicit('no') fg.podcast.itunes_image('x.png') fg.podcast.itunes_subtitle('x') fg.podcast.itunes_summary('x') assert fg.podcast.itunes_author() == 'Lars Kiesow' assert fg.podcast.itunes_block() == 'x' assert fg.podcast.itunes_complete() == 'no' assert fg.podcast.itunes_explicit() == 'no' assert fg.podcast.itunes_image() == 'x.png' assert fg.podcast.itunes_subtitle() == 'x' assert fg.podcast.itunes_summary() == 'x' # Check that we have the item in the resulting XML ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'} root = etree.fromstring(self.fg.rss_str()) author = root.xpath('/rss/channel/itunes:author/text()', namespaces=ns) assert author == ['Lars Kiesow'] def test_podcastEntryItems(self): fe = self.fg.add_item() fe.title('y') fe.podcast.itunes_author('Lars Kiesow') fe.podcast.itunes_block('x') fe.podcast.itunes_duration('00:01:30') fe.podcast.itunes_explicit('no') fe.podcast.itunes_image('x.png') fe.podcast.itunes_is_closed_captioned('yes') fe.podcast.itunes_order(1) fe.podcast.itunes_subtitle('x') fe.podcast.itunes_summary('x') assert fe.podcast.itunes_author() == 'Lars Kiesow' assert fe.podcast.itunes_block() == 'x' assert fe.podcast.itunes_duration() == '00:01:30' assert fe.podcast.itunes_explicit() == 'no' assert fe.podcast.itunes_image() == 'x.png' assert fe.podcast.itunes_is_closed_captioned() assert fe.podcast.itunes_order() == 1 assert fe.podcast.itunes_subtitle() == 'x' assert fe.podcast.itunes_summary() == 'x' # Check that we have the item in the resulting XML ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'} root = etree.fromstring(self.fg.rss_str()) author = root.xpath('/rss/channel/item/itunes:author/text()', namespaces=ns) assert author == ['Lars Kiesow']
def feed(uri, verif): uri = base64.urlsafe_b64decode(uri.encode('utf8')) verif = base64.urlsafe_b64decode(verif.encode('utf8')) mac = hmac.new(HMAC_KEY, uri, digestmod=pyblake2.blake2s).digest() if not hmac.compare_digest(verif, mac): abort(403) uri = uri.decode('utf8') verify_uri(uri) cachefile = pathfor(uri, '.picklejson', FEED_DIR) modified = etag = None cached = None if os.path.isfile(cachefile): try: with open(cachefile, 'rb') as f: cached = jsonpickle.decode(f.read()) app.logger.debug("Loaded cache from cachefile:%r", cachefile) etag = cached.etag if 'etag' in cached else None modified = cached.modified if 'modified' in cached else None except Exception as e: app.logger.warn("Could not load cache:%r", e) app.logger.debug("Parse feed: %r; etag:%r; modified:%r", uri, etag, modified) parsed = feedparser.parse(uri, etag=etag, modified=modified) app.logger.debug("Parsed feed: %r; %r", uri, 'status' in parsed and parsed.status) if parsed.status < 200 or parsed.status >= 400: app.logger.warn("Non okay status code, 404?") abort(404) if cached and not parsed.entries: parsed = cached def save_to_cache(): with tempfile.NamedTemporaryFile(delete=False, dir=FEED_DIR) as f: encoded = jsonpickle.encode(parsed) indented = json.dumps(json.loads(encoded), indent=4, sort_keys=True) f.write(indented) f.flush() os.rename(f.name, cachefile) os.chmod(cachefile, 0o644) app.logger.debug("Saved cache to cachefile:%r", cachefile) pool.submit(save_to_cache) feed = FeedGenerator() feed.id(uri) feed.title(parsed.feed.get('title', None) or '???') feed.link(href=parsed.feed.get('link', None) or 'about:blank') feed.description(parsed.feed.get('description', None) or '???') if 'image' in parsed.feed and 'href' in parsed.feed.image: feed.image(parsed.feed.image.href) for e in parsed.entries: try: entry = feed.add_entry(order='append') id = e.id if 'id' in e else None for l in (e.links if 'links' in e else []): if l.rel == 'enclosure' and 'href' in l: if not id: id = l.href storename = transcoded_href(l.href) entry.enclosure(urljoin(request.url, storename), l.get('size', None), l.get('type', OPUS_TYPE)) elif l.rel == 'alternate' and 'href' in l: entry.link(**l) for c in (e.content if 'content' in e else []): if 'type' in c and c.type.startswith('text/html'): entry.content(content=c.value, type='html') else: entry.content(content=c.value, type='text') entry.id(id) entry.title(e.get('title', None) or '???') entry.description(e.get('description', None) or '???') if 'updated_parsed' in e and e.updated_parsed: entry.updated( datetime.fromtimestamp(mktime(e.updated_parsed), pytz.UTC)) if 'published_parsed' in e and e.published_parsed: entry.published( datetime.fromtimestamp(mktime(e.published_parsed), pytz.UTC)) finally: pass try: resp = make_response(feed.rss_str(pretty=True)) resp.headers['content-type'] = 'application/xml' return resp except BaseException as e: raise e