class TestExtensionSyndication(unittest.TestCase): SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'} def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('syndication') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_update_period(self): for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'): self.fg.syndication.update_period(period_type) root = etree.fromstring(self.fg.rss_str()) a = root.xpath('/rss/channel/sy:UpdatePeriod', namespaces=self.SYN_NS) assert a[0].text == period_type def test_update_frequency(self): for frequency in (1, 100, 2000, 100000): self.fg.syndication.update_frequency(frequency) root = etree.fromstring(self.fg.rss_str()) a = root.xpath('/rss/channel/sy:UpdateFrequency', namespaces=self.SYN_NS) assert a[0].text == str(frequency) def test_update_base(self): base = '2000-01-01T12:00+00:00' self.fg.syndication.update_base(base) root = etree.fromstring(self.fg.rss_str()) a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS) assert a[0].text == base
def gen_feed(title, author, feed_url, url, subtitle, logo, categories=None, album=False, licenses=False): fg = FeedGenerator() fg.load_extension("podcast") fg.id(feed_url) fg.title(title) fg.author(author) fg.link(href=url) fg.link(href=feed_url, rel="self") fg.logo(logo) fg.subtitle(subtitle) fg.language("en") fg.generator(generator="reel2bits", uri=f"https://{current_app.config['AP_DOMAIN']}", version=g.cfg["REEL2BITS_VERSION"]) if album and categories: fg.podcast.itunes_category(categories[0]) fg.category([{"term": c, "label": c} for c in categories]) if licenses: fg.rights("See individual tracks: " + ", ".join(licenses)) return fg
def generate_rss(podcast_title, podcast_description, podcast_url, files_path, rss_filename, rss_name): print('generating rss...') files = get_media_files(files_path=files_path) fg = FeedGenerator() fg.title(podcast_title) fg.description(podcast_description) fg.link(href=podcast_url, rel='alternate') fg.load_extension('podcast') for file in files: name, ext = file.rsplit('.', 1) url = urljoin( urljoin(EXTERNAL_URL_TO_FILES, quote(rss_name)) + '/', quote(file)) mod_time_unix = os.path.getmtime(os.path.join(files_path, file)) mod_time = datetime.fromtimestamp(mod_time_unix).replace( tzinfo=timezone.utc) entry_id = str(uuid.uuid5(uuid.NAMESPACE_URL, name)) fe = fg.add_entry() fe.id(entry_id) fe.published(mod_time) fe.title(name) fe.description(name) fe.enclosure(url, 0, 'audio/mpeg') fg.rss_file(rss_filename, pretty=True) print('generated')
def podcast_feed(): logo_url = url_for("static", filename="wpclogo_big.png", _external=True) fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Technology', 'Podcasting') fg.podcast.itunes_image(logo_url) fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'}) fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self') fg.title('WPC Coders Podcast') fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.') # NOQA episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'), ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'), ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"), # NOQA ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")] # NOQA for epfile, eptitle, epdate, epdescription in episodes[::-1]: epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile) fe = fg.add_entry() fe.id(epurl) fe.title(eptitle) fe.description(epdescription) fe.podcast.itunes_image(logo_url) fe.pubdate(epdate.replace(tzinfo=pytz.UTC)) fe.enclosure(epurl, 0, 'audio/mpeg') return Response(response=fg.rss_str(pretty=True), status=200, mimetype='application/rss+xml')
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz): """Create the podcast file.""" fg = FeedGenerator() fg.load_extension('podcast') url = "{}{}.xml".format(base_public_url, show.id) fg.id(url.split('.')[0]) fg.title(show.name) fg.image(show.image_url) fg.description(show.description) fg.link(href=url, rel='self') # collect all mp3s for the given show all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id))) for filepath in all_mp3s: filename = os.path.basename(filepath) mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz) mp3_size = os.stat(filepath).st_size mp3_url = base_public_url + filename mp3_id = filename.split('.')[0] title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date) # build the rss entry fe = fg.add_entry() fe.id(mp3_id) fe.pubdate(mp3_date) fe.title(title) fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
def build(self): fg = FeedGenerator() fg.id(self.id) fg.title(self.title) # TODO: Get actual author, categories, etc details from the feed fg.author({'name': self.author, 'email': '*****@*****.**'}) fg.link(href=self.link, rel='alternate') fg.logo(self.logo) fg.subtitle(self.subtitle) fg.language(self.language) fg.load_extension('podcast', rss=True) fg.podcast.itunes_category('Technology', 'Podcasting') fg.podcast.itunes_summary(self.subtitle) # TODO: Add podcast image covers, both standard RSS and Itune-compatible for item in self.items: fe = fg.add_entry() fe.id(item.id) fe.title(item.title) fe.description(item.description) # fe.description('Enjoy our first episode.') fe.pubdate(item.published_at) file_name = os.path.basename(item.file) file_location = "%s/%s/%s" % (settings.S3_HTTP_PREFIX, self.slug, file_name) file_size = str(os.path.getsize(item.file)).encode("utf-8").decode("utf-8") fe.enclosure(file_location, file_size, 'audio/mpeg') self.body = fg.rss_str(pretty=True).decode("utf-8")
class TestExtensionTorrent(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('torrent') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_podcastEntryItems(self): fe = self.fg.add_item() fe.title('y') fe.torrent.filename('file.xy') fe.torrent.infohash('123') fe.torrent.contentlength('23') fe.torrent.seeds('1') fe.torrent.peers('2') fe.torrent.verified('1') assert fe.torrent.filename() == 'file.xy' assert fe.torrent.infohash() == '123' assert fe.torrent.contentlength() == '23' assert fe.torrent.seeds() == '1' assert fe.torrent.peers() == '2' assert fe.torrent.verified() == '1' # Check that we have the item in the resulting XML ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'} root = etree.fromstring(self.fg.rss_str()) filename = root.xpath('/rss/channel/item/torrent:filename/text()', namespaces=ns) assert filename == ['file.xy']
def create_podcast_from_channel(url, podcast_root_url, items_to_process): feed = feedparser.parse(url) fg = FeedGenerator() fg.load_extension('podcast') fg.title(feed['channel']['title']) fg.link(href=feed['channel']['href'], rel='alternate') fg.description('Audio tracks from YouTube Channel "{}"'.format(feed['channel']['title'])) # fg.image(url=feed.image.href, width=feed.image.width, height=feed.image.height) output_dir = '/y2p/output' for item in feed['items'][:items_to_process]: sys.stdout.write(item['link'] + '\n') our_id = hashlib.md5(item['link'].encode()).hexdigest() audio_fname = our_id + '.m4a' audio_fullpath = os.path.join(output_dir, audio_fname) if not os.path.exists(audio_fullpath): create_audio_file(audio_fullpath, our_id, item['link']) p_entry = fg.add_entry() p_entry.id(item['id']) p_entry.title(item['title']) p_entry.description(item['summary']) p_entry.enclosure(podcast_root_url + '/' + audio_fname, 0, 'audio/m4a') p_entry.published(item['published']) fg.rss_str(pretty=True) fg.rss_file(os.path.join(output_dir, 'feed.xml'))
class Rss: def __init__(self, title='', link='', logo='', description='none'): self.title = title self.fg = FeedGenerator() self.fg.load_extension('podcast') self.fg.title(title) self.fg.description(description) self.fg.link(href=link, rel='alternate') self.fg.generator('rss-it') if logo: self.fg.logo(logo) def add_video(self, link, title, pubdate, description=''): fe = self.fg.add_entry() fe.id(link) fe.title(title) fe.description(description) fe.pubDate(pubdate) mimetype = 'audio/mpeg' if link.endswith('.m4a'): mimetype = 'audio/mp4' # add more formats if needed (usually works with audio/mpeg by default) fe.enclosure(link, '0', mimetype) def export(self): return self.fg.rss_str(pretty=True)
def generate_feed(self): fg = FeedGenerator() fg.load_extension('podcast') for field in self.MAPPINGS: value_names = field[0] methods = field[1] values = [] # collect the values from self for value_name in value_names: values.append(getattr(self, value_name)) # decend the attribute tree method = get_method(methods, fg) # apply the values to the found method method(*values) for episode in self.episodes.all(): # This is the same pattern as above, I wonder if I can DRY this out. entry = fg.add_entry() value_names, method_names = zip(*episode.MAPPINGS) values = [] for ind, value_name in enumerate(value_names): print value_name values = [getattr(episode, v) for v in value_name] if None not in values: print values method = get_method(method_names[ind], entry) method(*values) print "DONE" return fg
def serialize(podcast: Podcast, pretty: bool = False) -> str: fg = FeedGenerator() fg.load_extension("podcast") # fg.podcast.itunes_author(podcast.author.name) # fg.podcast.itunes_category("Technology", "Podcasting") # fg.podcast.itunes_explicit("no") # fg.podcast.itunes_complete("no") # fg.podcast.itunes_new_feed_url("http://example.com/new-feed.rss") # fg.podcast.itunes_owner("John Doe", "*****@*****.**") # fg.podcast.itunes_summary("") fg.title(podcast.title) fg.description(podcast.description) fg.author(podcast.author.to_dict()) fg.link(href=podcast.atom_url, rel="self") fg.logo(podcast.logo_url) for episode in podcast.episodes: fe = fg.add_entry() fe.id(podcast.episode_audio_url(episode)) fe.title(episode.title) fe.description(episode.description) fe.enclosure(podcast.episode_audio_url(episode), 0, "audio/mpeg") # fe.author(**podcast.author.to_dict()) # fe.podcast.itunes_author(podcast.author.name) return fg.rss_str(pretty=pretty).decode("utf-8")
def generate_feed(config: Config, songs: List[Dict]) -> None: assert config.base_url, "Base URL is required to generate a feed!" feed_name = "index.xml" base_url = config.base_url fg = FeedGenerator() fg.load_extension("podcast") fg.id(base_url) fg.title(config.title) fg.subtitle(config.description) fg.link(href=base_url, rel="alternate") artists = {s["artist"] for s in songs if s.get("artist")} fg.author([{"name": artist} for artist in artists]) fg.logo(urljoin(base_url, "favicon.ico")) fg.link(href=urljoin(base_url, feed_name), rel="self") fg.language("en") for song in songs[::-1]: fe = fg.add_entry() song_url = urljoin(base_url, song["src"]) fe.id(song_url) fe.title(song["title"]) fe.description(DESCRIPTION_TEMPLATE.render(song=song).strip()) fe.link(href=urljoin(base_url, f'#{song["src"]}')) fe.enclosure(song_url, song["filesize"], "audio/mpeg") fe.published(entry_date(song["date"])) fg.rss_file(os.path.join(config.out_dir, feed_name))
def generate(self): url = self.configuration['network']['host'] + ":" + str(self.configuration['network']['port']) #Creating RSS feed fg = FeedGenerator() fg.load_extension('podcast') fg.title(self.configuration['rss']['title']) fg.author(dict(name=self.configuration['rss']['author'],email=self.configuration['rss']['email'])) fg.logo(url + "/" + self.configuration['rss']['logo']) fg.description(self.configuration['rss']['description']) #fg.subtitle(self.configuration['settings']['subtitle']) fg.link(href=url + "/" + self.configuration['rss']['file_name'], rel='self') fg.language(self.configuration['rss']['language']) for file_name in os.listdir(self.configuration['system']['media_folder']): if file_name[0] != "." and file_name != self.configuration['rss']['file_name'] and file_name[-3:] != "jpg": #Skip hidden files and RSS file itself #for i in range(len(self.data_dict)): #if (self.data_dict[i]['title'] == file_name[:-4]): fe = fg.add_entry() #fe.id((url + "/" + self.configuration['system']['media_folder'] + "/" + file_name).replace(" ", "%20")) fe.id((url + "/" + file_name).replace(" ", "%20")) fe.title(file_name[:-4]) #fe.description("\n".join(self.data_dict[i]['links'])) fe.description(file_name[:-4]) #Get time and date of file modification, add timezone offset = timezone(timedelta(hours=1)) pub_date = datetime.fromtimestamp(pathlib.Path(self.configuration['system']['media_folder'] + "/" + file_name).stat().st_mtime,tz=offset) fe.pubDate(pub_date) #file_size = os.path.getsize(self.configuration['system']['media_folder'] + "/" + file_name) fe.enclosure((url + "/" + file_name).replace(" ", "%20"), 0, 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file(self.configuration['system']['media_folder'] + "/" + self.configuration['rss']['file_name']) return True
def get_feed(request): fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') fg.podcast.itunes_image( request.build_absolute_uri(static('whatsnew/icon.png'))) # fg.id(request.build_absolute_uri()) fg.title("JW.ORG - What's New") fg.description( "See what has been recently added to jw.org, the official website of Jehovah's Witnesses." ) fg.link(href=request.build_absolute_uri(), rel='self') # Include all articles with audio in the past 30 days date_threshold = timezone.now() - timedelta(days=30) articles = Article.objects\ .filter(pub_date__gte=date_threshold)\ .filter(audio_file_link__isnull=False)\ .all() for article in articles: fe = fg.add_entry() fe.id(article.guid) fe.title(article.title) fe.description(article.description) fe.updated(article.pub_date) fe.published(article.pub_date) fe.enclosure(article.audio_file_link, str(article.audio_file_duration), article.audio_file_mimetype) fe.link(href=article.audio_file_link, type=article.audio_file_mimetype) return fg
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = FeedGenerator() feed.title(feed_title) feed.description(faker.sentence()) feed.link({'href': WP_FEED_URL}) entry = feed.add_entry() entry.title(title) entry.link({'href': url}) entry.author(name=faker.name()) entry.content(content, type="cdata") if summary: entry.description(summary) if enclosure: entry.enclosure(url=enclosure['url'], type=enclosure['type'], length=str(faker.pyint())) if media_thumbnail: feed.load_extension('media') entry.media.thumbnail({'url': media_thumbnail}) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) entry.published(published) entry.updated(faker.date_time_between(start_date=published, tzinfo=tz)) return feed.rss_str().decode('utf8')
def makeRss(self): fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://hypecast.blackmad.com/' + self.mode) fg.title('Hype Machine Robot Radio: ' + self.mode) fg.author( {'name':'David Blackmad','email':'*****@*****.**'} ) fg.logo('http://dump.blackmad.com/the-hype-machine.jpg') fg.language('en') fg.link(href='http://hypecast.blackmad.com/' + self.mode) fg.description('Hype Machine Robot Radio: ' + self.mode) description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)]) fe = fg.add_entry() fe.title(self.track_name) fe.description(description) fe.id(self.filename) # add length print(self.relative_dir) print(self.filename) fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg") rss_str = fg.rss_str() newItem = ET.fromstring(rss_str)[0].find('item') out = open(self.get_filename('xml'), 'w') out.write(ET.tostring(newItem)) out.close() self.updateRss()
def create_feed(self): """ replace results by info #results keys _type entries id title uploader uploader_id uploader_url extractor webpage_url webpage_url_basename extractor_key """ fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Podcasting') fg.title(self.infos['title']) fg.description('none') fg.link(href=self.args.podcast_url, rel='self') for item in self.infos['entries']: """ #infos['entries'] keys id uploader uploader_id uploader_url channel_id channel_url upload_date license creator title alt_title thumbnail description categories tags subtitles automatic_captions duration """ fe = fg.add_entry() fe.id(item['id']) fe.title(item['title']) fe.description(item['description']) item_full_path = self.args.podcast_url + '/' + self.infos[ 'title'] + '/' + item['title'] + '.mp3' fe.enclosure(item_full_path, str(item['duration']), 'audio/mpeg') fg.rss_str(pretty=True) # create folder of feed if it doesn't exists os.makedirs(self.args.dir + '/' + self.infos['title'], exist_ok=True) fg.rss_file(self.args.dir + '/' + self.infos['title'] + '/podcast.xml') return True
def main(output='whats-new.atom'): fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') fg.podcast.itunes_image("%s/icon.png" % URL_BASE) fg.title("JW.ORG - What's New") fg.description( "See what has been recently added to jw.org, the official website of Jehovah's Witnesses." ) fg.link(href="{}{}".format(URL_BASE, output), rel='self') for article in list_articles(): print(article.title) audio_file = article.audio_file if audio_file: fe = fg.add_entry() fe.id(article.guid) fe.title(article.title) fe.description(article.description) fe.updated(article.pub_date) fe.published(article.pub_date) fe.enclosure(audio_file['file']['url'], str(audio_file['duration']), audio_file['mimetype']) fe.link(href=audio_file['file']['url'], type=audio_file['mimetype']) fg.rss_str(pretty=True) fg.rss_file(os.path.join(CACHE_DIR, output))
class TestExtensionDc(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('dc') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_entryLoadExtension(self): fe = self.fg.add_item() try: fe.load_extension('dc') except ImportError: pass # Extension already loaded def test_elements(self): for method in dir(self.fg.dc): if method.startswith('dc_'): m = getattr(self.fg.dc, method) m(method) assert m() == [method] self.fg.id('123') assert self.fg.atom_str() assert self.fg.rss_str()
def generate_rss(show_info, show_uri, country_code): fg = FeedGenerator() fg.load_extension('podcast') fg.description(show_info['description']) fg.author({'name': show_info['publisher']}) fg.title(show_info['name']) fg.link({'href': show_info['external_urls']['spotify']}) fg.id(show_uri) fg.image(show_info.get('images')[0]['url']) total_episodes = show_info['episodes']['total'] added_episodes = 0 while added_episodes != total_episodes: episodes = sp.show_episodes(show_id=show_uri, limit=50, offset=added_episodes, market=country_code) for episode in episodes['items']: ent = fg.add_entry() ent.podcast.itunes_duration(int(episode['duration_ms'] / 1000)) ent.title(episode.get('name')) ent.guid(episode['uri']) ent.published( datetime.strptime(episode['release_date'], '%Y-%m-%d').replace(tzinfo=timezone.utc)) ent.description(episode['description']) ent.id(episode['uri']) ent.enclosure( url= f"https://anon-podcast.scdn.co/{episode['audio_preview_url'].split('/')[-1]}", length=0, type='audio/mpeg') added_episodes += 1 return fg.rss_str().decode('utf-8')
def generate_feed(self): fg = FeedGenerator() fg.load_extension('podcast') for field in self.MAPPINGS: value_names = field[0] methods = field[1] values = [] # collect the values from self for value_name in value_names: values.append( getattr(self, value_name) ) # decend the attribute tree method = get_method(methods, fg) # apply the values to the found method method(*values) for episode in self.episodes.all(): # This is the same pattern as above, I wonder if I can DRY this out. entry = fg.add_entry() value_names, method_names = zip(*episode.MAPPINGS) values = [] for ind, value_name in enumerate(value_names): print value_name values = [getattr(episode, v) for v in value_name] if None not in values: print values method = get_method(method_names[ind], entry) method(*values) print "DONE" return fg
def export_feed(self, output): fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') fg.podcast.itunes_image("%s/icon.png" % URL_BASE) fg.title('JW.ORG Magazines') fg.description('Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.') fg.link(href="%s/%s" % (URL_BASE, output), rel='self') manifest = self._load() entries = [] for lang, mnemonics in manifest.items(): for mnemonic, issues in mnemonics.items(): for issue, data in issues.items(): entries.append((issue, data)) for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True): fe = fg.add_entry() fe.id( entry['hash'] ) fe.title( entry['title'] ) fe.description( entry['title'] ) fe.published( pytz.utc.localize( entry['created_on'] ) ) url = "%s/%s" % (URL_BASE, os.path.basename(entry['file'])) mime = 'audio/mpeg' fe.enclosure(url, str(entry['duration']), mime) fe.link(href=url, type=mime) fg.rss_str(pretty=True) fg.rss_file(os.path.join(CACHE_DIR, output))
def generate_feed(output_file, exclude_highlights=True): # Parse RSS feed d = feedparser.parse(ESPN_RSS_FEED) IMAGE_URL = d.feed.image["href"] # RSS feed generation fg = FeedGenerator() fg.load_extension("podcast", rss=True) ## RSS tags # Required fg.title(d.feed.title) fg.link(href="https://github.com/aaearon/lebatard-show-rss") fg.description(d.feed.description) # Optional fg.language(d.feed.language) fg.image(IMAGE_URL) fg.subtitle(d.feed.subtitle) # iTunes fg.podcast.itunes_author(d.feed.author) fg.podcast.itunes_category(itunes_category=d.feed.category) fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit="clean") fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"]) tz = pytz.timezone("America/Los_Angeles") for e in d.entries: if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600: pass else: fe = fg.add_entry() fe.id(e.id) fe.title(e.title) fe.description(e.description) fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"]) fe.podcast.itunes_summary(e.description) fe.podcast.itunes_subtitle(e.description) fe.podcast.itunes_duration(e["itunes_duration"]) dt = datetime.fromtimestamp(time.mktime(e.published_parsed)) date = tz.localize(dt) # Local hour if "Show: " in e.title: fe.published(date) elif "Hour 1" in e.title: fe.published(date + timedelta(hours=1)) elif "Hour 2" in e.title: fe.published(date + timedelta(hours=2)) elif "Hour 3" in e.title: fe.published(date + timedelta(hours=3)) else: fe.published(date + timedelta(hours=-1)) fg.rss_str(pretty=True) fg.rss_file(output_file)
def get_feed(query, title, description, link, image): """Get an RSS feed from the results of a query to the YouTube API.""" service = _get_youtube_client() videos = service.search().list(part='snippet', **query, order='date', type='video', safeSearch='none').execute() fg = FeedGenerator() fg.load_extension('podcast') fg.title(title) fg.description(description) fg.link(href=link, rel='alternate') fg.image(image) youtube_plugin = get_plugin_from_settings() for video in videos['items']: try: video_url = youtube_plugin.extract_link( "https://www.youtube.com/watch?v=" + video['id']['videoId']) except PluginException: continue fe = fg.add_entry() fe.id(video['id']['videoId']) fe.title(video['snippet']['title']) fe.description(video['snippet']['description']) fe.pubdate(dateutil.parser.parse(video['snippet']['publishedAt'])) fe.podcast.itunes_image(video['snippet']['thumbnails']['high']['url']) video_info = requests.head(video_url) fe.enclosure(video_url, video_info.headers['Content-Length'], video_info.headers['Content-Type']) return fg.rss_str(pretty=True)
def _gen_rss(event): """Generate RSS feed file and place in bucket when file uploaded to S3""" region = os.getenv('BUCKET_REGION') s3 = boto3.resource('s3') sbucket = s3.Bucket(event.bucket) dbucket = s3.Bucket(os.getenv('RSS_BUCKET')) fg = FeedGenerator() fg.title('Lesscast Uploads') fg.description('Created by lesscast') fg.link(href='https://{}.s3.{}.amazonaws.com/rss.xml'.format( dbucket.name, region)) fg.load_extension('podcast') fg.podcast.itunes_category('Technology', 'Podcasting') keyfunc = operator.attrgetter('last_modified') iterator = filter(_get_valid_files, sbucket.objects.all()) for objsum in sorted(iterator, key=keyfunc): app.log.info('Adding %s to feed', objsum.key) pub_url = 'https://{}.s3.{}.amazonaws.com/{}'.format( sbucket.name, region, objsum.key) obj = objsum.Object() acl = obj.Acl() acl.put(ACL='public-read') fe = fg.add_entry() fe.id(pub_url) fe.link(href=pub_url) fe.title(os.path.basename(obj.key.rstrip('.mp3'))) fe.description('added by lesscast') fe.enclosure(pub_url, 0, 'audio/mpeg') rss_content = fg.rss_str(pretty=True) dbucket.put_object(ACL='public-read', Key='rss.xml', Body=rss_content) app.log.info('Complete')
def generate_feed_from_episodes(episodes): fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://dannyshaw.github.io/podcast-feeds') fg.title('Seinfeld Complete Audio') fg.link(href='http://dannyshaw.github.io/podcast-feeds', rel='alternate') fg.subtitle('I\'ve seen them enough, audio is all I need.') fg.link( href= 'https://raw.githubusercontent.com/dannyshaw/podcast-feeds/master/podcast-feeds/seinfeld.xml', rel='self') fg.language('en') for index, ep in enumerate(episodes): file_size = getsize(join(FILES, ep)) fe = fg.add_entry() fe.id(f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}') fe.title(ep) fe.description(ep) pub_date = datetime(1999, 1, 1, tzinfo=timezone.utc) + timedelta(index) fe.pubDate(pub_date) fe.link(href=f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}') fe.enclosure(f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}', f'{file_size}', 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file('seinfeld.xml')
def export_feed(self, output): fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') fg.podcast.itunes_image("%s/icon.png" % URL_BASE) fg.title('JW.ORG Magazines') fg.description( 'Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.' ) fg.link(href="%s/%s" % (URL_BASE, output), rel='self') manifest = self._load() entries = [] for lang, mnemonics in manifest.items(): for mnemonic, issues in mnemonics.items(): for issue, data in issues.items(): entries.append((issue, data)) for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True): fe = fg.add_entry() fe.id(entry['hash']) fe.title(entry['title']) fe.description(entry['title']) fe.published(pytz.utc.localize(entry['created_on'])) url = "%s/%s" % (URL_BASE, os.path.basename(entry['file'])) mime = 'audio/mpeg' fe.enclosure(url, str(entry['duration']), mime) fe.link(href=url, type=mime) fg.rss_str(pretty=True) fg.rss_file(os.path.join(CACHE_DIR, output))
def run(folder, url): from feedgen.feed import FeedGenerator fg = FeedGenerator() head, tail = os.path.split(folder) title = tail.decode("utf-8") fg.id(str(uuid.uuid4())) fg.title(title) fg.link(href="{0}/rss.xml".format(url), rel="self") fg.description(u"Audiobook `{0}` generated with rssbook".format(title)) fg.load_extension("podcast") for item in sorted(os.listdir(folder)): if os.path.splitext(item)[1] == ".mp3": get_node(os.path.join(folder, item)) fullpath = os.path.join(folder, item) fe = fg.add_entry() fe.id(str(uuid.uuid4())) fe.title(title) fe.description(item) fe.link( href="{0}/{1}".format(url, item), rel="enclosure", type="audio/mpeg", length=str(os.stat(fullpath).st_size) ) fg.rss_file(os.path.join(folder, "rss.xml"))
def build_rss_feeds(blog_posts): feed = FeedGenerator() feed.load_extension("media", rss=True, atom=True) base = "https://whotracks.me" for post in blog_posts: if post["publish"]: entry = feed.add_entry() entry.id(f'{base}/blog/{post["filename"]}.html') entry.title(post["title"]) entry.link(link={"href": f"{base}/blog/{post['filename']}.html"}) entry.author({"name": post["author"]}) entry.pubDate( datetime.strptime(post["date"], "%Y-%m-%d").replace(tzinfo=timezone("CET"))) entry.description(post["subtitle"]) entry.media.thumbnail( url=f'https://whotracks.me/static/img/{post["header_img"]}') feed.title("WhoTracksMe blog") feed.description("By the Ghostery tech team") feed.link(link={"href": f"{base}/blog.html"}) feed.id("wtm") feed.language("en") feed.logo(f"{base}/static/img/who-tracksme-logo.png") feed.rss_file("_site/blog/feed.xml")
def __init__(self, rss_filename, json_filename): self.videos = set() self.rss_filename = rss_filename self.json_filename = json_filename self.start_date = datetime(2017, 5, 1, tzinfo = timezone('Europe/Prague')) if os.path.exists(json_filename): with open(json_filename, 'r') as ifile: for i in json.load(ifile): self.videos.add(Video(json = i)) if len(self.videos) > 0: latest_date = max(map(lambda x: x.date, self.videos)) - timedelta(days = 7) if latest_date > self.start_date: self.start_date = latest_date logging.info('Downloading videos younger than: ' + datetime.strftime(self.start_date, datetime_format)) fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Technology', 'Podcasting') fg.id('marxin-dvtv') fg.title('DVTV') fg.author({'name': 'Martin Liška', 'email': '*****@*****.**' }) fg.language('cs-CZ') fg.link(href = 'http://video.aktualne.cz/dvtv/', rel = 'self') fg.logo(urljoin(root_url, 'podcasts/cover.jpg')) fg.description('DVTV') self.feed_generator = fg
def create_feed(): """Creates feed from items from db""" fg = FeedGenerator() fg.load_extension('podcast') fg.title('My feed') fg.link(href=baseurl, rel='alternate') fg.description('Some description') fg.author({"name": "makovako", "email": "*****@*****.**"}) fg.podcast.itunes_owner(name='makovako', email='*****@*****.**') fg.podcast.itunes_author("makovako") videos = get_all_videos() for video in videos: fe = fg.add_entry() fe.id(baseurl + 'download/' + video['youtube_id']) fe.title(video['title']) fe.description(video['description']) fe.podcast.itunes_author(video['uploader']) fe.podcast.itunes_image(video['thumbnail']) fe.enclosure(baseurl + 'download/' + video['youtube_id'] + '.mp3', 0, 'audio/mpeg') fg.rss_str(pretty=True) fg.rss_file('download/feed.xml')
def gen_feed(endpoint): # Make sure we have somewhere to save the files if not os.path.isdir('./gen'): print('There is no gen directory. Create ./gen') # Uses parse_dir.py to get the books and files books = parse_dir.getbooks_r('./audiobooks') for (book, files) in books: # Creates a new feed for each book fg = FeedGenerator() fg.load_extension('podcast') fg.podcast.itunes_category('Audiobook') for (file_name, file_path) in files: # the 1: removes the period because the base dir is ./audiobooks url = endpoint + file_path[1:] fe = fg.add_entry() fe.id(url) fe.title(file_name) fe.description(file_name) fe.enclosure(requote_uri(url), str(os.path.getsize(file_path)), 'audio/mpeg') fg.title(book) fg.link(href=endpoint, rel='self') fg.description(book) fg.rss_str(pretty=True) # Saves the file rss_file_path = os.path.join('./gen/', book + '.xml') ensure_dir(rss_file_path) logging.info("generate feed: %s" % rss_file_path) fg.rss_file(rss_file_path)
def _create_feed(speaker, talks, file_name): LOGGER.info("Creating feed for %s", speaker) updated = talks[0]['time'] fg = FeedGenerator() fg.load_extension('podcast') fg.language('en') fg.title(f'Talks By {speaker}') fg.link(href='http://philip.lundrigan.org/Speakercast/') fg.image(url=f'http://philip.lundrigan.org/Speakercast/covers/{urllib.parse.quote(speaker)}.jpg', title=f'General Conference talks by {speaker}.') fg.description(f'General Conference talks by {speaker}.') fg.author({'name':'Philip Lundrigan', 'email':'*****@*****.**'}) fg.generator('Speakercast') fg.pubDate(updated) fg.lastBuildDate(updated) fg.podcast.itunes_category('Religion & Spirituality', 'Christianity') for talk in talks: fe = fg.add_entry() fe.id('http://lernfunk.de/media/654321/1/file.mp3') fe.title(talk['title']) fe.description(talk['preview']) fe.content(talk['html'], type='CDATA') fe.enclosure(talk['audio_url'], str(talk['audio_size']), 'audio/mpeg') fe.id(talk['uri']) fe.link(href=talk['url']) fe.published(talk['time']) fg.rss_file(file_name, pretty=True)
def init_feed_generator(feed): feed_generator = FeedGenerator() feed_generator.load_extension('podcast') feed_generator.title("PocketCast") feed_generator.link(href=feed.feed.link, rel='alternate') feed_generator.subtitle(feed.feed.description or 'PocketCast') return feed_generator
class PodcastBaseItemExporter(BaseItemExporter, metaclass=abc.ABCMeta): """Item exporter base class designed to generate RSS feeds. The class uses feedgen to generate the RSS content. Subclasses are expected to implement the method save_to_storage. """ def __init__(self, uri, title, description, url, image_url, **kwargs): """Initializes the exporter. Args: uri: Where to save the feed. title: Podcast title. description: Description of the podcast. url: Url of the podcast. image_url: Main image of the podcast. **kwargs: Any extra argument for BaseItemExporter. """ super().__init__(**kwargs) self.uri = uri self.fg = FeedGenerator() self.fg.load_extension('podcast') self.fg.title(title) self.fg.description(description) self.fg.link(href=url) self.fg.image(image_url) self.fg._FeedGenerator__rss_lastBuildDate = None # This prevents Plex from confusing pubDate with lastBuildDate def export_item(self, item): """Adds a new entry to the RSS feed. Args: item: A PodcastEpisodeItem. """ fe = self.fg.add_entry() title = item.get('title') description = item.get('description') publication_date = item.get('publication_date') audio_url = item.get('audio_url') guid = item.get('guid') fe.title(title) fe.description(description) fe.published(publication_date) fe.enclosure(audio_url, 0, 'audio/mpeg') fe.guid(guid) def finish_exporting(self): """Generates the RSS content and saves it to a file""" rss_content = self.fg.rss_str(pretty=True) self.save_to_storage(rss_content) @abc.abstractmethod def save_to_storage(self, rss_content): """Subclasses must implement a way of saving the content. """ pass
def write_manifest(articles, out_file): gen = FeedGenerator() gen.load_extension('podcast') write_meta_info(gen) for article in articles: write_article(article, gen) gen.rss_file(out_file)
def run(feeds): fgen = FeedGenerator() fgen.load_extension('podcast') result = reduce(merge, map(feed_parse, feeds), FeedGenerator()) result.rss_file(sys.stdout)
def __init_feed_generator(self): feed_generator = FeedGenerator() feed_generator.load_extension('podcast') feed_generator.title(self.title) feed_generator.link(self.__generate_podcast_link(self.link)) feed_generator.description(self.description) feed_generator.podcast.itunes_category('Religion & Spirituality', 'Judaism') return feed_generator
def _get_header(self): rss = FeedGenerator() rss.load_extension('dc') rss.title('Feed title: %s' % self._rss_path) rss.link(href=self.__URL, rel='self') rss.description('Feed description') return rss
def _get_header(self): title, desc, self.__uploads_id = self.__get_channel_details(self._rss_path) rss = FeedGenerator() rss.load_extension('dc') rss.title(title) rss.link(href=self.__PLAYLIST_URL % self.__uploads_id, rel='self') rss.description(desc or title) return rss
def generate_feed(channel_dict, file_metadatas): fg = FeedGenerator() fg.load_extension("podcast") fg.link(href=channel_dict["url"], rel="self") fg.title(channel_dict["title"]) fg.description(channel_dict["description"]) for file_metadata in file_metadatas: add_entry(fg, file_metadata) return fg.rss_str(pretty=True)
def rss(request): # track it! # v=1 // Version. # &tid=UA-XXXXX-Y // Tracking ID / Property ID. # &cid=555 // Anonymous Client ID. # &t=pageview // Pageview hit type. # &dh=mydemo.com // Document hostname. # &dp=/home // Page. # &dt=homepage // Title. angrates_uuid = uuid.UUID('f93c5388-f60b-5159-bbfc-d08d6f7b401f') x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') cid = uuid.uuid5(angrates_uuid, ip) data = { 'v': 1, 'tid': 'UA-19269567-1', 'cid': cid, 't': 'pageview', 'dh': 'armstrongandgettybingo.com', 'dp': '/rss/', 'dt': 'Podcast', } requests.post('https://www.google-analytics.com/collect', data=data) fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://www.armstrongandgettybingo.com/rss') fg.podcast.itunes_category('News & Politics', 'Conservative (Right)') fg.podcast.itunes_explicit('no') fg.title('The Armstrong and Getty Show (Bingo)') fg.author( {'name':'Ben Friedland','email':'*****@*****.**'} ) fg.link( href='http://www.armstrongandgettybingo.com', rel='alternate' ) fg.logo('https://s3-us-west-1.amazonaws.com/bencast/bingologo.png') fg.subtitle('Armstrong and Getty Bingo') fg.description('The Armstrong and Getty Show - Unofficial Feed including Archives back to 2001.') fg.link( href='http://www.armstrongandgettybingo.com/rss', rel='self' ) fg.language('en') pacific = pytz.timezone('America/Los_Angeles') for hour in Hour.objects.all().order_by('-pub_date'): fe = fg.add_entry() fe.id(hour.link) fe.title(hour.title) fe.description(hour.description) fe.enclosure(hour.link, 0, 'audio/mpeg') fe.published(pacific.localize(hour.pub_date)) return HttpResponse(fg.rss_str(pretty=True), content_type='application/rss+xml')
def setup_feed(): fg = FeedGenerator() fg.load_extension("podcast") fg.language("en") fg.id("https://jdelman.me/potato") fg.author(name="Potato", email="*****@*****.**") fg.link(href="https://jdelman.me/potato", rel="alternate") fg.logo("https://jdelman.me/static/potato.jpg") fg.title("Potato - Josh's Saved Videos") fg.subtitle("Automatically generated RSS.") return fg
def get_feedgenerator(self): fg = FeedGenerator() fg.id('http://pod.w-me.net') fg.title('W-Me Podcast') fg.description('W-Me podcast') fg.author( {'name':'Alex Dai','email':'*****@*****.**'} ) fg.link( href='http://pod.w-me.net', rel='alternate' ) fg.logo('http://pandodaily.files.wordpress.com/2012/08/shutterstock_58664.jpg') #fg.subtitle('This is a cool feed!') fg.link( href='http://pod.w-me.net/feed.atom', rel='self' ) fg.language('en') fg.load_extension('podcast') fg.podcast.itunes_category('Technology', 'Podcasting') return fg
def _get_header(self): rss = FeedGenerator() rss.load_extension('dc') # channel xml tags chan_details = self.__get_xml_dict( self.__rss_xml.find('channel'), ['title', 'description', 'link'] ) rss.title(chan_details['title']) rss.link(href=chan_details['link'], rel='self') rss.description(chan_details['description']) return rss
def add_feed(self, feed_id, yt_playlist): feed = FeedGenerator() feed.load_extension("podcast") feed.id(feed_id) feed.title(yt_playlist["snippet"]["title"]) feed.author({"name": yt_playlist["snippet"]["channelTitle"]}) feed.description(yt_playlist["snippet"]["description"]) feed.logo(yt_playlist["snippet"]["thumbnails"]["standard"]["url"]) feed.link(href="https://www.youtube.com/playlist?list=%s" % (yt_playlist["id"])) feed.rss_str(pretty=True) feed.last_updated = 0 self.feeds[feed_id] = feed return feed
def xml(): items = Item.query.join(Channel, Channel.id == Item.channel_id).add_columns(Item.update, Item.number, Item.file_url, Channel.title, Channel.name, Channel.personality, Channel.text, Channel.copyright, Channel.image_url).order_by(Item.update.desc()).all() fg = FeedGenerator() fg.load_extension('podcast') fg.link(href='http://www.onsen.ag/', rel='alternate') fg.title(u'音泉 for Podcast') fg.subtitle(u'音泉 アニメ・ゲーム・声優系ラジオ') for item in items: fe = fg.add_entry() fe.id(item.file_url) fe.title(u'[{0}][{1}]{2}'.format(item.update.strftime('%Y/%m/%d'), item.number, item.title)) fe.description(item.text) fe.enclosure(item.file_url, 0, 'audio/mpeg') tz = pytz.timezone('Asia/Tokyo') d = item.update.replace(tzinfo=tz) fe.pubdate(d.isoformat()) xml = fg.rss_str(pretty=True) return Response(xml, mimetype='text/xml')
class Feed: def __init__(self, baseURL, audioDir): self.baseURL = baseURL self.dir = audioDir self.fg = FeedGenerator() self.fg.load_extension('podcast') self.fg.id(baseURL) self.fg.title('Yesterdays Baseball') self.fg.author( name='MLB' ) self.fg.link( href=baseURL, rel='alternate' ) self.fg.logo('http://en.wikipedia.org/wiki/Major_League_Baseball_logo#/media/File:Major_League_Baseball.svg') self.fg.icon('http://en.wikipedia.org/wiki/Major_League_Baseball_logo#/media/File:Major_League_Baseball.svg') self.fg.subtitle("Awright, 'arry? See that ludicrous display last night?") self.fg.link( href=baseURL+'podcast.xml', rel='self' ) self.fg.language('en') self.fg.podcast.itunes_explicit('no') self.fg.podcast.itunes_complete('no') self.fg.podcast.itunes_new_feed_url(baseURL+'podcast.xml') self.fg.podcast.itunes_summary("Awright, 'arry? See that ludicrous display last night?") self.addAllEntries() def __repr__(self): return self.fg.rss_str(pretty=True) def addAllEntries(self): for root, dirs, files in os.walk(self.dir): for f in files: if os.path.splitext(f)[1] in MIME_TYPES.keys(): self.addEntry(root,f) def addEntry(self,root,f): path = os.path.join(root,f) fileName, fileExtension = os.path.splitext(f) print "Adding...",path fe = self.fg.add_entry() fe.id(self.baseURL+f) mediafile = ID3(path) fe.title(mediafile['TIT2'].text[0] + " " + fileName) fe.summary(mediafile['TPE1'].text[0]) fe.content(mediafile['TPE1'].text[0]) fe.enclosure(self.baseURL+f, 0, MIME_TYPES[fileExtension])
def createFeed(links, titles): # feed dosyasini olustur fg = FeedGenerator() fg.load_extension("podcast") fg.id("http://twitter.com/dorukcankisin") fg.title(DIZI_TITLE) fg.author({"name": "dorukcan kisin", "email": "*****@*****.**"}) fg.link(href="http://twitter.com/dorukcankisin", rel="alternate") fg.logo(DIZI_LOGO) fg.subtitle(DIZI_TITLE + " videocast") fg.language("en") for i, url in enumerate(links): fe = fg.add_entry() fe.id(url) fe.enclosure(url, 0, "video/mp4") fe.title(titles[i]) fe.description(titles[i]) fg.rss_file("rss.xml") return fg.rss_str(pretty=True)
def create_feed(episodes, output_filepath=None): woe_feed = FeedGenerator() woe_feed.load_extension('podcast', atom=True) woe_feed.title(u"Willkommen Österreich") woe_feed.id(EPISODES_SCRAPING_URL) woe_feed.link(href=BASE_URL, rel='self') woe_feed.description(u"Inoffizieller RSS-Feed für 'Willkommen Österreich'-Episoden") woe_feed.language('de') for episode in episodes: episode_entry = woe_feed.add_entry() episode_entry.id(episode.page) episode_entry.link(href=episode.page, rel='alternate') episode_entry.title(u"Folge {0} - {1}: {2}".format(episode.num, episode.date, episode.description)) for video in episode.videos: episode_entry.enclosure(url=video, length=0, type='mp4') if output_filepath: woe_feed.atom_file(output_filepath) else: print(woe_feed.atom_str(pretty=True))
def generate_feed(channel_dict, file_metadatas): fg = FeedGenerator() fg.load_extension("podcast") fg.link(href=channel_dict["url"], rel="self") fg.title(channel_dict["title"]) fg.description(channel_dict["description"]) try: category = channel_dict["category"] except KeyError: category = None try: subcategory = channel_dict["subcategory"] except KeyError: subcategory = None fg.podcast.itunes_category(category, subcategory) for file_metadata in file_metadatas: add_entry(fg, file_metadata) return fg.rss_str(pretty=True)
class TestExtensionGeo(unittest.TestCase): def setUp(self): self.fg = FeedGenerator() self.fg.load_extension('geo') self.fg.title('title') self.fg.link(href='http://example.com', rel='self') self.fg.description('description') def test_geoEntryItems(self): fe = self.fg.add_item() fe.title('y') fe.geo.point('42.36 -71.05') assert fe.geo.point() == '42.36 -71.05' # Check that we have the item in the resulting XML ns = {'georss': 'http://www.georss.org/georss'} root = etree.fromstring(self.fg.rss_str()) point = root.xpath('/rss/channel/item/georss:point/text()', namespaces=ns) assert point == ['42.36 -71.05']
def generate_feed(input_file, output_file): fg = FeedGenerator() fg.load_extension('podcast', rss=True) ## RSS tags # Required fg.title(TITLE) fg.link(href=LINK) fg.description(DESCRIPTION) # Optional fg.language('en') fg.image(url=IMAGE_URL, title=TITLE, link=LINK) fg.ttl(720) fg.webMaster(CONTACT['name']) now = datetime.datetime.now() tz = pytz.timezone('Europe/Amsterdam') fg.pubDate(tz.localize(now)) # iTunes fg.podcast.itunes_author('Dan LeBatard') fg.podcast.itunes_category(itunes_category='Sports & Recreation', itunes_subcategory='Professional') fg.podcast.itunes_image(itunes_image=IMAGE_URL) fg.podcast.itunes_explicit(itunes_explicit='clean') fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email']) # Add items items = read_items(input_file) for item in items: fe = fg.add_entry() ## RSS tags fe.id(item['guid']) fe.title(item['title']) fe.description(item['description']) fe.enclosure(item['link'], 0, 'audio/mpeg') fe.pubdate(item['pubDate']) # Finish off the file fg.rss_str(pretty=True) fg.rss_file(output_file)
def create_mixed_feed(remix_feed, location, output_dir, just_rss=False): """Create an rss feed for mixed sessions. location is the hostname and folder, like 'http://abc.com/remix/ output_dir is the folder to write mixed sessions to """ fg = FeedGenerator() fg.load_extension('podcast') fg.id(location) fg.title(remix_feed.title) fg.subtitle('this is only a remix') fg.link(href=os.path.join(location, 'rss.xml'), rel='self') if os.path.exists(output_dir): if not just_rss: print('output directory exists, overwriting...') shutil.rmtree(output_dir) os.mkdir(output_dir) else: os.mkdir(output_dir) for remix in remix_feed.sessions: fe = fg.add_entry() if not just_rss: mixed = mix_session(remix) mixed.export(os.path.join(output_dir, with_mp3_ext(remix.title)), format='mp3') fe.id(os.path.join(location, urlparse.quote(with_mp3_ext(remix.title)))) fe.title(remix.title) fe.description(remix.description) fe.enclosure(os.path.join(location, urlparse.quote(with_mp3_ext(remix.title))), 0, 'audio/mpeg') fg.rss_file(os.path.join(output_dir, 'rss.xml'), pretty=True)
def makePassThroughRss(self): fg = FeedGenerator() fg.load_extension('podcast') fg.id('http://hypecast.blackmad.com/' + self.mode) fg.title('Hype Machine PassThru Radio: ' + self.mode) fg.author( {'name':'David Blackmad','email':'*****@*****.**'} ) fg.logo('http://themelkerproject.com/wp-content/uploads/2013/10/the-hype-machine.jpg') fg.language('en') fg.link(href='http://hypecast.blackmad.com/' + self.mode) fg.description('Hype Machine PassThru: ' + self.mode) # description = '<br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)]) for s in self.songs: fe = fg.add_entry() fe.title(self.mk_song_id(s)) fe.id(s['mediaid']) fe.description(s['description']) fe.podcast.itunes_image(s['thumb_url']) # add length fe.enclosure(url = 'http://hypecast.blackmad.com/%s/%s' % ('hypecasts', s['filename']), type="audio/mpeg") podcast_xml_file = os.path.join(self.output_dir, 'podcast.xml') fg.rss_file(podcast_xml_file)