def genero_feed(episodesList): if episodesList: # Creo un nuovo podcast p = Podcast() p.name = "All You Can Dance by Dino Brawn" p.description = "Feed Podcast non ufficiale di All You Can Dance by Dino Brown - Powered By Andrea Draghetti" p.website = "https://onedance.fm/" p.explicit = True p.image = "https://rss.draghetti.it/allyoucandance_image.jpg" p.feed_url = "https://rss.draghetti.it/allyoucandance.xml" p.copyright = "One Dance" p.language = "it-IT" for episodedetails in episodesList: episode = Episode() episode.title = episodedetails[1].encode("ascii", "ignore") episode.link = episodedetails[2].encode("ascii", "ignore") # La dimensione e statistica in base alle puntante analizzate episode.media = Media(episodedetails[3], 30000000, type="audio/x-m4a", duration=None) episode.publication_date = episodedetails[4] p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def genero_feed(puntateList): if puntateList: # Creo un nuovo podcast p = Podcast() p.name = "Il Ruggito del Coniglio" p.description = "Il Ruggito del Coniglio, il programma cult di Radio 2 condotto da Marco Presta e Antonello Dose, racconta l'attualita con folgorante ironia." p.website = "http://www.raiplayradio.it/programmi/ilruggitodelconiglio/" p.explicit = True p.image = "https://rss.draghetti.it/ruggitodelconiglio_image.jpg" p.feed_url = "https://rss.draghetti.it/ruggitodelconiglio.xml" p.copyright = "Rai Radio 2" p.language = "it-IT" for puntata in puntateList: episode = Episode() episode.title = puntata[0].encode("ascii", "ignore") episode.link = puntata[1] # La dimensione del file e approssimativa episode.media = Media(puntata[3], puntata[4]) if puntata[2]: episode.publication_date = datetime.datetime(int(puntata[2].split("/")[2]), int(puntata[2].split("/")[1]), int(puntata[2].split("/")[0]), 10, 00, tzinfo=pytz.utc) else: episode.publication_date = pytz.utc.localize(datetime.datetime.utcnow()) p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def genero_feed(episodesList): if episodesList: # Creo un nuovo podcast p = Podcast() p.name = "NECST Tech Time" p.description = "Feed Podcast non ufficiale di NECST Tech Time - Powered By Andrea Draghetti" p.website = "http://www.poliradio.it/podcast/programmi/34/necst-tech-time" p.explicit = True p.image = "https://rss.draghetti.it/necst_image.jpg" p.feed_url = "https://rss.draghetti.it/necstpodcast.xml" p.copyright = "Poli Radio" p.language = "it-IT" for episodedetails in episodesList: episode = Episode() episode.title = episodedetails[1].encode("ascii", "ignore") episode.link = episodedetails[2].encode("ascii", "ignore") # La dimensione e statistica in base alle puntante analizzate episode.media = Media(episodedetails[3], 30000000, type="audio/x-m4a", duration=None) episode.publication_date = episodedetails[4] p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def genero_feed(puntateList): if puntateList: # Creo un nuovo podcast p = Podcast() p.name = "Pascal Rai Radio 2" p.description = "Pascal un programma di Matteo Caccia in onda su Radio2 che racconta storie di vita. Episodi grandi o piccoli, stravolgenti o minuti, momenti che hanno modificato per sempre la nostra vita o che, anche se di poco, l'hanno indirizzata. Storie che sono il termometro della temperatura di ognuno di noi e che in parte raccontano chi siamo. " p.website = "http://www.raiplayradio.it/programmi/pascal/" p.explicit = True p.image = "https://rss.draghetti.it/pascal_image.jpg" p.feed_url = "https://rss.draghetti.it/pascal.xml" p.copyright = "Rai Radio 2" p.language = "it-IT" for puntata in puntateList: episode = Episode() episode.title = puntata[0].encode("ascii", "ignore") episode.link = puntata[1] # La dimensione del file e approssimativa episode.media = Media(puntata[3], puntata[4]) if puntata[2]: episode.publication_date = datetime.datetime(int(puntata[2].split("/")[2]), int(puntata[2].split("/")[1]), int(puntata[2].split("/")[0]), 20, 00, tzinfo=pytz.utc) else: episode.publication_date = pytz.utc.localize(datetime.datetime.utcnow()) p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def genero_feed(episodesList): if episodesList: # Creo un nuovo podcast p = Podcast() p.name = "NECST Tech Time" p.description = "The NECSTLab (Novel, Emerging Computing System Technologies Laboratory) is a laboratory inside DEIB department of Politecnico di Milano, where there are a number of different research lines on advanced topics in computing systems: from architectural characteristics, to hardware-software codesign methodologies, to security and dependability issues of complex system architectures (scaling from mobile devices to large virtualized datacenters)." p.website = "http://www.poliradio.it/podcast/programmi/34/necst-tech-time" p.explicit = True p.image = "https://rss.draghetti.it/necst_image.jpg" p.feed_url = "https://rss.draghetti.it/necstpodcast.xml" p.copyright = "Poli Radio" p.language = "it-IT" for episodedetails in episodesList: episode = Episode() episode.title = episodedetails[1].encode("ascii", "ignore") episode.link = episodedetails[2].encode("ascii", "ignore") # La dimensione e statistica in base alle puntante analizzate episode.media = Media(episodedetails[3], 30000000, type="audio/x-m4a", duration=None) episode.publication_date = episodedetails[4] p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def scrape_by_program(program, web_session=requests_html.HTMLSession(), params=params): podcast = Podcast() podcast.explicit = False podcast.website = params[PARAMS_BASEURL].format(program=program) if program == 'morning-edition': podcast.name = "NPR Morning Edition" podcast.description = \ """Every weekday for over three decades, Morning Edition has taken listeners around the country and the world with two hours of multi-faceted stories and commentaries that inform, challenge and occasionally amuse. Morning Edition is the most listened-to news radio program in the country.""" podcast.image = 'https://media.npr.org/assets/img/2018/08/06/npr_me_podcasttile_sq-4036eb96471eeed96c37dfba404bb48ea798e78c-s200-c85.jpg' elif program == 'all-things-considered': podcast.name = "NPR All Things Considered" podcast.description = \ """NPR's afternoon news show""" podcast.image = 'https://media.npr.org/assets/img/2018/08/06/npr_atc_podcasttile_sq-bcc33a301405d37aa6bdcc090f43d29264915f4a-s200-c85.jpg' elif program == 'weekend-edition-saturday': podcast.name = "NPR Weekend Edition Saturday" podcast.description = \ """NPR morning news on Saturday""" podcast.image = 'https://media.npr.org/assets/img/2019/02/26/we_otherentitiestemplatesat_sq-cbde87a2fa31b01047441e6f34d2769b0287bcd4-s200-c85.png' elif program == 'weekend-edition-sunday': podcast.name = "NPR Weekend Edition Sunday" podcast.description = \ """NPR morning news show on Sunday""" podcast.image = 'https://media.npr.org/assets/img/2019/02/26/we_otherentitiestemplatesun_sq-4a03b35e7e5adfa446aec374523a578d54dc9bf5-s200-c85.png' else: raise WebFormatException(f"program { program } not found") scrape(web_session, params, program, podcast) rssfeed = podcast.rss_str(minimize=False) #log.debug(f"\n\nfeed { rssfeed }") return rssfeed
def create_podcast(name, desc, website): p = Podcast() #if not res: p.name = name p.description = desc p.authors = [Person("Dawn News", "*****@*****.**")] p.website = website p.image = "http://3.15.38.214/zarahatkay/cover_art.png" p.language = "en-US" p.feed_url = "http://3.15.38.214/zarahatkay" p.category = Category("News & Politics") p.explicit = False return p
def generate_rss_from_articles(feed_settings, articles): """ Creates a FeedGenerator feed from a set of feed_entries. :param feed_settings: a feed_settings object containing :param articles: :return: """ # Initialize the feed podcast = Podcast() podcast.name = feed_settings.title author = Person(feed_settings.author['name'], feed_settings.author['email']) podcast.authors.append(author) podcast.website = feed_settings.source_page_url podcast.copyright = feed_settings.copyright podcast.description = feed_settings.subtitle podcast.summary = feed_settings.subtitle podcast.subtitle = feed_settings.subtitle podcast.language = 'vi' podcast.feed_url = feed_settings.output_url podcast.image = feed_settings.img_url podcast.category = Category('Music', 'Music Commentary') podcast.explicit = False # p.complete = False # p.new_feed_url = 'http://example.com/new-feed.rss' podcast.owner = author # p.xslt = "http://example.com/stylesheet.xsl" vt_tz = pytz.timezone('Asia/Ho_Chi_Minh') pastdate = datetime.datetime(2000, 1, 1, 0, 0).astimezone(vt_tz) # podcast.last_updated = datetime.datetime.now(vt_tz) for article in articles: episode = podcast.add_episode() episode.id = article.link episode.title = article.title episode.summary = article.description episode.link = article.link # episode.authors = [Person('Lars Kiesow', '*****@*****.**')] episode.publication_date = article.pub_date pastdate = max(pastdate, article.pub_date) # episode.media = Media.create_from_server_response(article.media, size=None, duration=None) episode.media = Media(article.media, size=None, duration=None, type=article.type) podcast.last_updated = pastdate podcast.publication_date = pastdate return podcast
def genero_feed(puntateList): if puntateList: # Creo un nuovo podcast p = Podcast() p.name = "Pascal Rai Radio 2" p.description = "Pascal un programma di Matteo Caccia in onda su Radio2 che racconta storie di vita. Episodi grandi o piccoli, stravolgenti o minuti, momenti che hanno modificato per sempre la nostra vita o che, anche se di poco, l'hanno indirizzata. Storie che sono il termometro della temperatura di ognuno di noi e che in parte raccontano chi siamo. " p.website = "http://www.raiplayradio.it/programmi/pascal/" p.explicit = True p.image = "https://rss.draghetti.it/pascal_image.jpg" p.feed_url = "https://rss.draghetti.it/pascal.xml" p.copyright = "Rai Radio 2" p.language = "it-IT" for puntata in puntateList: episode = Episode() episode.title = puntata[0].encode("ascii", "ignore") episode.link = puntata[1] # La dimensione del file e approssimativa episode.media = Media(puntata[3], puntata[4]) if puntata[2]: episode.publication_date = datetime.datetime( int(puntata[2].split("/")[2]), int(puntata[2].split("/")[1]), int(puntata[2].split("/")[0]), 20, 00, tzinfo=pytz.utc) else: episode.publication_date = pytz.utc.localize( datetime.datetime.utcnow()) p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
from podgen import Podcast, Episode, Media # Create the Podcast p = Podcast( name="365 days of plops", description="Every shit I take " "you get to hear it", website="http://example.org/animals-alphabetically", explicit=False, ) p.image = "https://github.com/ssk8/365days_of_plops/raw/main/pooping.jpg" p.episodes += [ Episode( title="just a turd", media=Media( "https://github.com/ssk8/365days_of_plops/raw/main/poop01.mp3", 11932295), summary="With an English name adapted directly from Afrikaans " '-- literally meaning "earth pig" -- this fascinating ' "animal has both circular teeth and a knack for " "digging.", ), Episode( title="ya, das ist die heiße scheiße", media=Media( "https://github.com/ssk8/365days_of_plops/raw/main/poop02.mp3", 15363464), summary="Colon evacuation " "hurt my anus ", ),
def setUp(self): self.existing_locale = locale.setlocale(locale.LC_ALL, None) locale.setlocale(locale.LC_ALL, 'C') fg = Podcast() self.nsContent = "http://purl.org/rss/1.0/modules/content/" self.nsDc = "http://purl.org/dc/elements/1.1/" self.nsItunes = "http://www.itunes.com/dtds/podcast-1.0.dtd" self.feed_url = "http://example.com/feeds/myfeed.rss" self.name = 'Some Testfeed' # Use character not in ASCII to catch encoding errors self.author = Person('Jon Døll', '*****@*****.**') self.website = 'http://example.com' self.description = 'This is a cool feed!' self.subtitle = 'Coolest of all' self.language = 'en' self.cloudDomain = 'example.com' self.cloudPort = '4711' self.cloudPath = '/ws/example' self.cloudRegisterProcedure = 'registerProcedure' self.cloudProtocol = 'SOAP 1.1' self.pubsubhubbub = "http://pubsubhubbub.example.com/" self.contributor = { 'name': "Contributor Name", 'email': 'Contributor email' } self.copyright = "The copyright notice" self.docs = 'http://www.rssboard.org/rss-specification' self.skip_days = set(['Tuesday']) self.skip_hours = set([23]) self.explicit = False self.programname = podgen.version.name self.web_master = Person(email='*****@*****.**') self.image = "http://example.com/static/podcast.png" self.owner = self.author self.complete = True self.new_feed_url = "https://example.com/feeds/myfeed2.rss" self.xslt = "http://example.com/feed/stylesheet.xsl" fg.name = self.name fg.website = self.website fg.description = self.description fg.subtitle = self.subtitle fg.language = self.language fg.cloud = (self.cloudDomain, self.cloudPort, self.cloudPath, self.cloudRegisterProcedure, self.cloudProtocol) fg.pubsubhubbub = self.pubsubhubbub fg.copyright = self.copyright fg.authors.append(self.author) fg.skip_days = self.skip_days fg.skip_hours = self.skip_hours fg.web_master = self.web_master fg.feed_url = self.feed_url fg.explicit = self.explicit fg.image = self.image fg.owner = self.owner fg.complete = self.complete fg.new_feed_url = self.new_feed_url fg.xslt = self.xslt self.fg = fg warnings.simplefilter("always") def noop(*args, **kwargs): pass warnings.showwarning = noop
json.dump(updated_session_items, outfile, indent=2) print('>>> wrote fresh sessions.json file') # write the new rss file p = Podcast() p.name = "The Objectivism Seminar" p.category = Category("Society & Culture", "Philosophy") p.language = "en-US" p.explicit = True p.description = ( "A weekly online conference call to systematically study " + "the philosophy of Objectivism via the works of prominent Rand scholars.") p.website = "https://www.ObjectivismSeminar.com" p.image = "https://www.ObjectivismSeminar.com/assets/images/atlas-square.jpg" p.feed_url = "https://www.ObjectivismSeminar.com/archives/rss" p.authors = [Person("Greg Perkins, Host", "*****@*****.**")] p.owner = Person("Greg Perkins", "*****@*****.**") p.episodes += [ Episode(title=x['title'], media=Media(x['link'], type="audio/mpeg", size=x['length']), id=x['GUID'], publication_date=x['pubDate'], summary=x['description']) for x in updated_session_items ] p.rss_file(rss_filename) print('>>> wrote fresh rss.xml file')
def lambda_handler(event, context): print('Starting cccRssBuilder Lambda function') # Get episodes from DynamoDB episodes = query_episodes() episodes.sort(key=lambda x: x['episode-num']) # Create the podcast feed # Main podcast info comes from "episode 0" episodeInfo = episodes[0] separator = ', ' p = Podcast() p.name = episodeInfo['name'] p.description = episodeInfo['description'] p.website = episodeInfo['website'] p.explicit = episodeInfo['explicit'] p.image = episodeInfo['image'] p.feed_url = episodeInfo['feed-url'] p.language = episodeInfo['language'] p.category = Category(episodeInfo['category'], episodeInfo['subcategory']) p.owner = Person(episodeInfo['owner-name'], episodeInfo['owner-email']) p.authors = [Person(episodeInfo['owner-name'], episodeInfo['owner-email'])] # Process each episode for episode in episodes: # Skip "Episode 0" if episode['episode-num'] == 0: continue # Check if episode contains media file info (name, duration, size). If not, add it to db and episode object. if 'media-file' not in episode: episodeNum = episode['episode-num'] print('Analyzing media file for episode', episodeNum) mediaFile = 'ccc-{:03d}-{}.mp3'.format(int(episodeNum), episode['pub-date']) print('Media file:', mediaFile) localMediaFile = '/tmp/' + mediaFile s3 = boto3.client('s3') s3.download_file('kwksolutions.com', 'ccc/media/' + mediaFile, localMediaFile) # Try to analyze the mp3 file - looking for duration and file size try: audio = MP3(localMediaFile) except: print('Not an MP3 file!') return duration = round(audio.info.length) hours = int(duration / 3600) minutes = int((duration % 3600) / 60) seconds = duration % 60 if hours == 0: durationStr = '{:02d}:{:02d}'.format(minutes, seconds) else: durationStr = '{:02d}:{:02d}:{:02d}'.format( hours, minutes, seconds) size = str(os.path.getsize(localMediaFile)) update_episode(episodeNum, mediaFile, size, durationStr) episode['media-file'] = mediaFile episode['size'] = size episode['duration'] = durationStr # Figure out all the info needed for the episode object mediaURL = 'https://www.kwksolutions.com/ccc/media/' + episode[ 'media-file'] durationList = episode['duration'].split(':') secs = int(durationList[-1]) mins = int(durationList[-2]) try: h = int(durationList[-3]) except: h = 0 pubdateList = episode['pub-date'].split('-') year = int(pubdateList[0]) month = int(pubdateList[1]) day = int(pubdateList[2]) # Build the episode object e = p.add_episode() e.id = mediaURL e.title = 'Episode ' + str(episode['episode-num']) e.summary = episode['description'] e.link = 'http://christcommunitycarmel.org/get-involved/podcasts' e.publication_date = datetime.datetime(year, month, day, 12, 00, 00, tzinfo=pytz.timezone('EST')) e.media = Media(mediaURL, episode['size'], duration=datetime.timedelta(hours=h, minutes=mins, seconds=secs)) # Write the rss file print('Writing RSS file to S3') rssLocalFile = '/tmp/podcast.rss' rssS3File = 'ccc/podcast.rss' p.rss_file(rssLocalFile) s3 = boto3.client('s3') s3.upload_file(rssLocalFile, 'kwksolutions.com', rssS3File, ExtraArgs={'ContentType': 'text/xml'}) return
def setUp(self): fg = Podcast() self.nsContent = "http://purl.org/rss/1.0/modules/content/" self.nsDc = "http://purl.org/dc/elements/1.1/" self.nsItunes = "http://www.itunes.com/dtds/podcast-1.0.dtd" self.feed_url = "http://example.com/feeds/myfeed.rss" self.name = 'Some Testfeed' self.author = Person('John Doe', '*****@*****.**') self.website = 'http://example.com' self.description = 'This is a cool feed!' self.subtitle = 'Coolest of all' self.language = 'en' self.cloudDomain = 'example.com' self.cloudPort = '4711' self.cloudPath = '/ws/example' self.cloudRegisterProcedure = 'registerProcedure' self.cloudProtocol = 'SOAP 1.1' self.pubsubhubbub = "http://pubsubhubbub.example.com/" self.contributor = {'name':"Contributor Name", 'email': 'Contributor email'} self.copyright = "The copyright notice" self.docs = 'http://www.rssboard.org/rss-specification' self.skip_days = set(['Tuesday']) self.skip_hours = set([23]) self.explicit = False self.programname = podgen.version.name self.web_master = Person(email='*****@*****.**') self.image = "http://example.com/static/podcast.png" self.owner = self.author self.complete = True self.new_feed_url = "https://example.com/feeds/myfeed2.rss" self.xslt = "http://example.com/feed/stylesheet.xsl" fg.name = self.name fg.website = self.website fg.description = self.description fg.subtitle = self.subtitle fg.language = self.language fg.cloud = (self.cloudDomain, self.cloudPort, self.cloudPath, self.cloudRegisterProcedure, self.cloudProtocol) fg.pubsubhubbub = self.pubsubhubbub fg.copyright = self.copyright fg.authors.append(self.author) fg.skip_days = self.skip_days fg.skip_hours = self.skip_hours fg.web_master = self.web_master fg.feed_url = self.feed_url fg.explicit = self.explicit fg.image = self.image fg.owner = self.owner fg.complete = self.complete fg.new_feed_url = self.new_feed_url fg.xslt = self.xslt self.fg = fg warnings.simplefilter("always") def noop(*args, **kwargs): pass warnings.showwarning = noop
def main(argv): FLAGS(argv) global GLOBAL_CONFIG GLOBAL_CONFIG = json.loads( open(os.path.join(FLAGS.dir, GLOBAL_CONFIG_FILE_NAME)).read()) print GLOBAL_CONFIG global SHARED_LINK_FETCHER SHARED_LINK_FETCHER = DropboxSharedLinkFetcher( GLOBAL_CONFIG['dropbox_link']) for dir_, dirs, file_names in os.walk(FLAGS.dir): current_folder_name = os.path.basename(os.path.normpath(dir_)) # We only process files if they contain at least one audio file has_audio = False for fname in file_names: for audio_ext in AUDIO_FILES: if fname.endswith(audio_ext): has_audio = True # Process if it has an audio file if has_audio: relative_path = os.path.relpath(dir_, FLAGS.dir) print "---------------------------------------------------" print "Creating Podcast for path: ", relative_path # Read params from config file if it exists title = current_folder_name description = None config_file_path = os.path.join(dir_, CONFIG_FILE) if os.path.isfile(config_file_path): config = json.loads(open(config_file_path).read()) if 'title' in config: title = config['title'] if 'description' in config: description = config['description'] if not description: description = 'No description provided.' # Create a podcast here p = Podcast(name=title, description=description, explicit=False, website='http://google.com', withhold_from_itunes=True) audio_files = [] # Get all files needed for the podcast for file_name in file_names: filename_without_extension = os.path.splitext(file_name)[0] relative_file_path = os.path.join(relative_path, file_name) full_path = os.path.join(dir_, file_name) full_path_without_extension, file_extension = os.path.splitext( full_path) if file_extension in AUDIO_FILES: print "Adding episode: ", relative_file_path # add it # print mutagen.File(full_path) # Check to see if there's a corresponding image file image_url = None relative_image_path = None png_file_name = filename_without_extension + '.png' jpg_file_name = filename_without_extension + '.jpg' png_path = os.path.join(dir_, png_file_name) jpg_path = os.path.join(dir_, jpg_file_name) if os.path.isfile(png_path): relative_image_path = os.path.join( relative_path, png_file_name) if os.path.isfile(jpg_path): relative_image_path = os.path.join( relative_path, jpg_file_name) if relative_image_path: image_url = get_image_download_url(relative_image_path) audio_files.append( Track(relative_file_path, taglib.File(full_path), image_url=image_url)) elif file_extension in IMAGE_FILES and filename_without_extension == 'podcast': # image for the podcast should be named podcast.jpg # this mean your audio files should not be named "podcast.mp3" # if you have per-episode images p.image = get_image_download_url(relative_file_path) # sort the tracks we found by their track number audio_files.sort(key=lambda x: x.track_number) # ensure that we don't have duplicate track numbers track_numbers = [x.track_number for x in audio_files] assert len(set(track_numbers)) == len(track_numbers) # Commented out check below because sometimes we have only a few tracks available #for i in range(0, len(audio_files)): # assert audio_files[i].track_number == i + 1, "Error with podcast: " + relative_path # add episodes current_date_time = datetime(2016, 12, 31, 12, 0, tzinfo=pytz.utc) position = 1 for audio_file in audio_files: episode = audio_file.get_episode() episode.position = position position += 1 if episode.publication_date is None: episode.publication_date = current_date_time # We do this so that episodes appear in order current_date_time -= timedelta(days=1) p.episodes.append(episode) # We only write the file if there are any changes to not spam the Dropbox recent # files section # write the file if there are any changes rss_path = os.path.join(dir_, 'feed.rss') old_rss_file_contents_date_removed = None if os.path.exists(rss_path): old_rss_file_contents = open(rss_path, 'r').read() old_rss_file_contents_date_removed = re.sub( r'<lastBuildDate>.*</lastBuildDate>', '', old_rss_file_contents) new_rss_file_contents = unicode(p).encode('utf8') new_rss_file_contents_date_removed = re.sub( r'<lastBuildDate>.*</lastBuildDate>', '', new_rss_file_contents) if old_rss_file_contents_date_removed != new_rss_file_contents_date_removed: with open(rss_path, 'wb') as w: w.write(new_rss_file_contents) feed_relative_path = os.path.join(relative_path, 'feed.rss') # Write Readme file if there are any changes readme_path = os.path.join(dir_, 'README.txt') old_readme_file_contents = None if os.path.exists(readme_path): old_readme_file_contents = open(readme_path, 'r').read() new_readme_file_contents = 'You can find the feed for your podcast here:\n\n%s\n' % get_download_url( feed_relative_path) new_readme_file_contents += '\n\nOtherwise, try this link:\n\n%s' % get_direct_download_url( feed_relative_path) if old_readme_file_contents != new_readme_file_contents: with open(readme_path, 'wb') as w: # We use a try/except here because there's a race condition here the first time you # run this because the .rss file doesn't exist in Dropbox at this point. So on the # first run, we won't write the feed.rss file (or any number of runs until the # .rss files are synced) try: w.write(new_readme_file_contents) except: pass
file_list = [ f for f in os.listdir(os.path.join(base_dir, pod_dir)) if f.endswith('.mp3') ] return sorted(file_list) p = Podcast( name="Chapo Cheaters Club", description="Just taking" "what isn't mine", website=host_address, explicit=False, ) p.image = host_address + "logo.jpg" for pod in find_pods(): audio = MP3(os.path.join(base_dir, pod_dir, pod), ID3=EasyID3) if audio: title = str(*audio["title"]) else: title = pod[:~3] size = os.path.getsize(os.path.join(base_dir, pod_dir, pod)) duration = timedelta(seconds=audio.info.length) print(f'{pod}, {title}, {size}, {duration}') p.episodes += [ Episode(title=title, media=Media(f"{host_address}{pod}", size=size, duration=duration),
def create_rss(type, download): """Create an example podcast and print it or save it to a file.""" # Create the Podcast & initialize the feed default_channel = Channel.defaultChannel() p = Podcast() p.name = default_channel.name p.description = default_channel.description p.website = default_channel.website p.explicit = default_channel.explicit p.image = default_channel.image p.copyright = default_channel.copyright p.language = default_channel.language p.feed_url = default_channel.feed_url p.category = Category(default_channel.category) # p.category = Category('Technology', 'Podcasting') # p.xslt = "https://example.com/feed/stylesheet.xsl" # URL of XSLT stylesheet p.authors = [Person(default_channel.authors, default_channel.authors_email)] p.owner = Person(default_channel.owner, default_channel.owner_email) # Other Attributes p.generator = " " # Others for iTunes # p.complete = False # p.new_feed_url = 'http://example.com/new-feed.rss' # e1 = p.add_episode() # e1.id = 'http://lernfunk.de/_MEDIAID_123#1' # e1.title = 'First Element' # e1.summary = htmlencode('''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Tamen # aberramus a proposito, et, ne longius, prorsus, inquam, Piso, si ista # mala sunt, placet. Aut etiam, ut vestitum, sic sententiam habeas aliam # domesticam, aliam forensem, ut in fronte ostentatio sit, intus veritas # occultetur? Cum id fugiunt, re eadem defendunt, quae Peripatetici, # verba <3.''') # e1.link = 'http://example.com' # e1.authors = [Person('Lars Kiesow', '*****@*****.**')] # e1.publication_date = datetime.datetime(2014, 5, 17, 13, 37, 10, tzinfo=pytz.utc) # # e1.media = Media("http://example.com/episodes/loremipsum.mp3", 454599964, # # duration= # # datetime.timedelta(hours=1, minutes=32, seconds=19)) # e1.media = Media("http://example.com/episodes/loremipsum.mp3", 454599964) # Add some episodes p.episodes += [ Episode(title = download.title, subtitle = download.subtitle, # id=str(uuid.uuid4()), position =2, media = Media(download.media_url, size=download.media_size, duration=timedelta(seconds=download.media_duration)), image = download.image_url, publication_date = datetime(year=2021, month=1, day=8, hour=10, minute=0, tzinfo=pytz.utc), summary = download.summary) , Episode(title="Episode 2 - The Crazy Ones", subtitle="this is a cool episode, this is for th crazy ones", position=1, image="https://github.com/oliverbarreto/PersonalPodcast/raw/main/site-logo-1400x1400.png", media=Media("https://github.com/oliverbarreto/PersonalPodcast/raw/main/downloaded_with_pytube_Apple%20Steve%20Jobs%20Heres%20To%20The%20Crazy%20Ones.mp4", type="audio/mpeg", size=989, duration=timedelta(hours=0, minutes=1, seconds=1)), publication_date = datetime(year=2021, month=1, day=6, hour=10, minute=0, tzinfo=pytz.utc), summary=htmlencode("wow wow wow summary")) , Episode(title="Episode 3 - The Super Crazy", subtitle="crazy ones revisited", position=0, image="https://github.com/oliverbarreto/PersonalPodcast/raw/main/site-logo-1400x1400.png", media=Media("https://drive.google.com/file/d/1X5Mwa8V0Su1IDqhcQL7LdzEY0VaMC1Nn", type="audio/mpeg", size=989, duration=timedelta(hours=0, minutes=1, seconds=1)), publication_date = datetime(year=2021, month=1, day=10, hour=10, minute=0, tzinfo=pytz.utc), summary=download.summary) ] # Should we just print out, or write to file? if type == 'print': # Print print_enc(p.rss_str()) elif type== 'feed.xml': # Write to file p.rss_file(type, minimize=False) print("\n") print("feed.xml created !!!")