def test_warningWhenSettingSizeToZero(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") self.assertEqual(len(w), 0) # Set size to zero, triggering a warning m = Media(self.url, type=self.type) self.assertEqual(len(w), 1) assert issubclass(w[-1].category, UserWarning) # No warning when setting to an actual integer m.size = 253634535 self.assertEqual(len(w), 1) # Nor when using a string m.size = "15kB" self.assertEqual(len(w), 1) # Warning when setting to None m.size = None self.assertEqual(len(w), 2) assert issubclass(w[-1].category, UserWarning) # Or zero m.size = 0 self.assertEqual(len(w), 3) assert issubclass(w[-1].category, UserWarning)
def test_assigningUrl(self): m = Media(self.url) another_url = "http://example.com/2016/5/17/The+awful+episode.mp3" m.url = another_url assert m.url == another_url # Test that setting url to None or empty string fails self.assertRaises((ValueError, TypeError), setattr, m, "url", None) assert m.url == another_url self.assertRaises((ValueError, TypeError), setattr, m, "url", "") assert m.url == another_url
def test_assigningType(self): m = Media(self.url, self.size, self.type) another_type = "audio/x-mpeg-3" m.type = another_type assert m.type == another_type # Test that setting type to None or empty string fails self.assertRaises((ValueError, TypeError), setattr, m, "type", None) assert m.type == another_type self.assertRaises((ValueError, TypeError), setattr, m, "type", "") assert m.type == another_type
def test_calculateDuration(self, mock_tinytag): # Return the correct number of seconds from TinyTag seconds = 14.0 * 60.0 mock_tinytag.get.return_value.duration = seconds filename = "my_little_file.mp3" m = Media(self.url, self.size, self.type) m.populate_duration_from(filename) self.assertAlmostEqual(m.duration.total_seconds(), seconds, places=0) # Check that the underlying library is used correctly mock_tinytag.get.assert_called_once_with(filename)
def genero_feed(episodesList): if episodesList: # Creo un nuovo podcast p = Podcast() p.name = "NECST Tech Time" p.description = "Feed Podcast non ufficiale di NECST Tech Time - Powered By Andrea Draghetti" p.website = "http://www.poliradio.it/podcast/programmi/34/necst-tech-time" p.explicit = True p.image = "https://rss.draghetti.it/necst_image.jpg" p.feed_url = "https://rss.draghetti.it/necstpodcast.xml" p.copyright = "Poli Radio" p.language = "it-IT" for episodedetails in episodesList: episode = Episode() episode.title = episodedetails[1].encode("ascii", "ignore") episode.link = episodedetails[2].encode("ascii", "ignore") # La dimensione e statistica in base alle puntante analizzate episode.media = Media(episodedetails[3], 30000000, type="audio/x-m4a", duration=None) episode.publication_date = episodedetails[4] p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def episode(self, show, episode_dict): # Find the publication date publication_datetime_str = str(episode_dict['dato']) + " 00:00:00" publication_datetime_format = "%Y%m%d %H:%M:%S" # Start out with midnight publication_date = datetime.datetime.strptime( publication_datetime_str, publication_datetime_format) # Then go to the specified time publication_datetime_naive = \ publication_date + datetime.timedelta(seconds=episode_dict['time']) # And associate a timezone with that datetime timezone = pytz.timezone("Europe/Oslo") publication_datetime_aware = \ timezone.localize(publication_datetime_naive) # Create our episode object return Episode( show=show, media=Media(episode_dict['url'], episode_dict['filesize'], None, datetime.timedelta(seconds=episode_dict['duration'])), id="radiorevolt.no/podkast/episode/" + str(episode_dict['id']), deprecated_url=episode_dict['deprecated_url'], title=episode_dict['title'], long_summary=linkify(htmlencode(episode_dict['comment'])).replace( "\n", "<br/>\n"), publication_date=publication_datetime_aware, authors=[Person(name=episode_dict['author'])] if episode_dict['author'] else [], )
def get_episode(self, episode_id): trycount = 0 findepisode = False while not findepisode: if trycount > 0: print("再接続中" + str(trycount) + "......") if trycount > 1: print("error url: " + self.episode_detail_url.format(episode_id) + "\n") return False, "error url: " + self.episode_detail_url.format(episode_id) + "\n" webpage = tools.get_url(self.episode_detail_url.format(episode_id), self.headers) detail = json.loads(webpage.decode('utf-8')) episode = self.podcast.add_episode() episode.id = str('ximalaya_' + str(episode_id)) episode.title = detail['title'] # print(self.podcast.name + '=====' + episode.title) if 'intro' in detail: episode.summary = detail['intro'].replace('\r', '\\r').replace('\n', '\\n') episode.publication_date = tools.publication_time(detail['createdAt']) episode.media = Media(detail['playUrl32'], duration=timedelta(milliseconds=detail['duration'])) # episode.media = Media.create_from_server_response(detail['playUrl32'], # duration=timedelta(seconds=detail['duration'])) episode.position = 1 findepisode = True if not findepisode: trycount += 1 print("30秒後に再接続する.......") sleep(30) return True, detail['playUrl32'] + '\n'
def test_createFromServerResponse(self): # Mock our own requests object url = self.url type = self.type size = self.size class MyLittleRequests(object): @staticmethod def head(*args, **kwargs): assert args[0] == url assert kwargs['allow_redirects'] == True assert 'timeout' in kwargs class MyLittleResponse(object): headers = { 'Content-Type': type, 'Content-Length': size, } @staticmethod def raise_for_status(): pass return MyLittleResponse m = Media.create_from_server_response(url, duration=self.duration, requests_=MyLittleRequests) self.assertEqual(m.url, url) self.assertEqual(m.size, size) self.assertEqual(m.type, type) self.assertEqual(m.duration, self.duration)
def genero_feed(puntateList): if puntateList: # Creo un nuovo podcast p = Podcast() p.name = "Il Ruggito del Coniglio" p.description = "Il Ruggito del Coniglio, il programma cult di Radio 2 condotto da Marco Presta e Antonello Dose, racconta l'attualita con folgorante ironia." p.website = "http://www.raiplayradio.it/programmi/ilruggitodelconiglio/" p.explicit = True p.image = "https://rss.draghetti.it/ruggitodelconiglio_image.jpg" p.feed_url = "https://rss.draghetti.it/ruggitodelconiglio.xml" p.copyright = "Rai Radio 2" p.language = "it-IT" for puntata in puntateList: episode = Episode() episode.title = puntata[0].encode("ascii", "ignore") episode.link = puntata[1] # La dimensione del file e approssimativa episode.media = Media(puntata[3], puntata[4]) if puntata[2]: episode.publication_date = datetime.datetime(int(puntata[2].split("/")[2]), int(puntata[2].split("/")[1]), int(puntata[2].split("/")[0]), 10, 00, tzinfo=pytz.utc) else: episode.publication_date = pytz.utc.localize(datetime.datetime.utcnow()) p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def test_pickling(self, mock_requests): m = Media(self.url, self.size, self.type, self.duration) m2 = pickle.loads(pickle.dumps(m)) self.assertEqual(m.url, m2.url) self.assertEqual(m.size, m2.size) self.assertEqual(m.type, m2.type) self.assertEqual(m.duration, m2.duration)
def main(): with open('thebugle.json') as f: episodes = json.load(f) p = Podcast( name="TimesOnLine Bugle Archive", description="Old Bugle episodes, podcast feed", website="https://www.thebuglepodcast.com/", explicit=False, ) for episode in episodes: ep = p.add_episode( Episode(title=f"{episode['id']}: {episode['title']}")) ep.media = Media.create_from_server_response( f"{MEDIA_BASE_URL}/{episode['file']}") ep.media.fetch_duration() date = episode['date'].split('-') ep.publication_date = datetime(int(date[0]), int(date[1]), int(date[2]), 0, 0, 0, tzinfo=pytz.utc) print(p.rss_str())
def genero_feed(episodesList): if episodesList: # Creo un nuovo podcast p = Podcast() p.name = "All You Can Dance by Dino Brawn" p.description = "Feed Podcast non ufficiale di All You Can Dance by Dino Brown - Powered By Andrea Draghetti" p.website = "https://onedance.fm/" p.explicit = True p.image = "https://rss.draghetti.it/allyoucandance_image.jpg" p.feed_url = "https://rss.draghetti.it/allyoucandance.xml" p.copyright = "One Dance" p.language = "it-IT" for episodedetails in episodesList: episode = Episode() episode.title = episodedetails[1].encode("ascii", "ignore") episode.link = episodedetails[2].encode("ascii", "ignore") # La dimensione e statistica in base alle puntante analizzate episode.media = Media(episodedetails[3], 30000000, type="audio/x-m4a", duration=None) episode.publication_date = episodedetails[4] p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def album(self): album_info_content = requests.get(self.album_info_api).content album_info_data = json.loads(album_info_content) album_list_content = requests.get(self.album_list_api).content album_list_data = json.loads(album_list_content) self.podcast = Podcast() self.podcast.name = album_info_data['data']['title'] self.podcast.authors.append(Person("Powered by maijver", '*****@*****.**')) self.podcast.website = self.url self.podcast.copyright = 'cc-by' self.podcast.description = album_info_data['data']['description'] self.podcast.language = 'cn' self.podcast.image = album_info_data['data']['thumbs']['small_thumb'].replace('!200', '') self.podcast.feed_url = 'http://podcast.forecho.com/qingting/%s.rss' % self.album_id self.podcast.category = Category('Technology', 'Podcasting') self.podcast.explicit = False self.podcast.complete = False self.podcast.owner = Person("maijver", '*****@*****.**') for each in album_list_data['data']: episode = self.podcast.add_episode() episode.id = str(each['id']) episode.title = each['title'] print(self.podcast.name + '=====' + each['title']) episode.image = album_info_data['data']['thumbs']['small_thumb'].replace('!200', '') episode.summary = each['title'] episode.link = 'http://www.qingting.fm/channels/{}/programs/{}'.format(self.album_id, each['id']) episode.authors = [Person("forecho", '*****@*****.**')] episode.publication_date = self.reduction_time(each['update_time']) episode.media = Media("http://od.qingting.fm/{}".format(each['mediainfo']['bitrates_url'][0]['file_path']), each['duration']) self.podcast.rss_file('qingting/{}.rss'.format(self.album_id), minimize=True)
def genero_feed(episodesList): if episodesList: # Creo un nuovo podcast p = Podcast() p.name = "NECST Tech Time" p.description = "The NECSTLab (Novel, Emerging Computing System Technologies Laboratory) is a laboratory inside DEIB department of Politecnico di Milano, where there are a number of different research lines on advanced topics in computing systems: from architectural characteristics, to hardware-software codesign methodologies, to security and dependability issues of complex system architectures (scaling from mobile devices to large virtualized datacenters)." p.website = "http://www.poliradio.it/podcast/programmi/34/necst-tech-time" p.explicit = True p.image = "https://rss.draghetti.it/necst_image.jpg" p.feed_url = "https://rss.draghetti.it/necstpodcast.xml" p.copyright = "Poli Radio" p.language = "it-IT" for episodedetails in episodesList: episode = Episode() episode.title = episodedetails[1].encode("ascii", "ignore") episode.link = episodedetails[2].encode("ascii", "ignore") # La dimensione e statistica in base alle puntante analizzate episode.media = Media(episodedetails[3], 30000000, type="audio/x-m4a", duration=None) episode.publication_date = episodedetails[4] p.episodes.append(episode) # Print to stdout, just as an example p.rss_file(rssfile, minimize=False)
def generate_podcast_xml(base, books): from podgen import Podcast, Episode from datetime import timedelta from podgen import Media p = Podcast() p.name = "AeonNeo's Audiobooks" p.description = "Description" p.website = "www.yangvincent.com" p.explicit = False # create episode for book_name in books: ep = Episode() ep.title = book_name[:-4] full_path = base + '/files/' + book_name dev_path = 'files/' + book_name try: book_size = os.path.getsize(dev_path) except OSError as e: print(e) book_size = 0 ep.media = Media(full_path, type='audio/mp4a', size=book_size) p.episodes.append(ep) # Generate rss p.rss_file('skeleton/rss.xml', minimize=True)
def main(): """Create an example podcast and print it or save it to a file.""" # There must be exactly one argument, and it is must end with rss if len(sys.argv) != 2 or not ( sys.argv[1].endswith('rss')): # Invalid usage, print help message # print_enc is just a custom function which functions like print, # except it deals with byte arrays properly. print_enc ('Usage: %s ( <file>.rss | rss )' % \ 'python -m podgen') print_enc ('') print_enc (' rss -- Generate RSS test output and print it to stdout.') print_enc (' <file>.rss -- Generate RSS test teed and write it to file.rss.') print_enc ('') exit() # Remember what type of feed the user wants arg = sys.argv[1] from podgen import Podcast, Person, Media, Category, htmlencode # Initialize the feed p = Podcast() p.name = 'Testfeed' p.authors.append(Person("Lars Kiesow", "*****@*****.**")) p.website = 'http://example.com' p.copyright = 'cc-by' p.description = 'This is a cool feed!' p.language = 'de' p.feed_url = 'http://example.com/feeds/myfeed.rss' p.category = Category('Technology', 'Podcasting') p.explicit = False p.complete = False p.new_feed_url = 'http://example.com/new-feed.rss' p.owner = Person('John Doe', '*****@*****.**') p.xslt = "http://example.com/stylesheet.xsl" e1 = p.add_episode() e1.id = 'http://lernfunk.de/_MEDIAID_123#1' e1.title = 'First Element' e1.summary = htmlencode('''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Tamen aberramus a proposito, et, ne longius, prorsus, inquam, Piso, si ista mala sunt, placet. Aut etiam, ut vestitum, sic sententiam habeas aliam domesticam, aliam forensem, ut in fronte ostentatio sit, intus veritas occultetur? Cum id fugiunt, re eadem defendunt, quae Peripatetici, verba <3.''') e1.link = 'http://example.com' e1.authors = [Person('Lars Kiesow', '*****@*****.**')] e1.publication_date = datetime.datetime(2014, 5, 17, 13, 37, 10, tzinfo=pytz.utc) e1.media = Media("http://example.com/episodes/loremipsum.mp3", 454599964, duration= datetime.timedelta(hours=1, minutes=32, seconds=19)) # Should we just print out, or write to file? if arg == 'rss': # Print print_enc(p.rss_str()) elif arg.endswith('rss'): # Write to file p.rss_file(arg, minimize=True)
def test_idNotSetButEnclosureIsUsed(self): guid = "http://example.com/podcast/episode1.mp3" episode = Episode() episode.title = "My first episode" episode.media = Media(guid, 97423487, "audio/mpeg") item = episode.rss_entry() assert item.find("guid").text == guid
def test_constructor(self): title = "A constructed episode" subtitle = "We're using the constructor!" summary = "In this week's episode, we try using the constructor to " \ "create a new Episode object." long_summary = "In this week's episode, we try to use the constructor " \ "to create a new Episode object. Additionally, we'll " \ "check whether it actually worked or not. Hold your " \ "fingers crossed!" media = Media("http://example.com/episodes/1.mp3", 1425345346, "audio/mpeg", datetime.timedelta(hours=1, minutes=2, seconds=22)) publication_date = datetime.datetime(2016, 6, 7, 13, 37, 0, tzinfo=pytz.utc) link = "http://example.com/blog/?i=1" authors = [Person("John Doe", "*****@*****.**")] image = "http://example.com/static/1.png" explicit = True is_closed_captioned = False position = 3 withhold_from_itunes = True ep = Episode( title=title, subtitle=subtitle, summary=summary, long_summary=long_summary, media=media, publication_date=publication_date, link=link, authors=authors, image=image, explicit=explicit, is_closed_captioned=is_closed_captioned, position=position, withhold_from_itunes=withhold_from_itunes, ) # Time to check if this works self.assertEqual(ep.title, title) self.assertEqual(ep.subtitle, subtitle) self.assertEqual(ep.summary, summary) self.assertEqual(ep.long_summary, long_summary) self.assertEqual(ep.media, media) self.assertEqual(ep.publication_date, publication_date) self.assertEqual(ep.link, link) self.assertEqual(ep.authors, authors) self.assertEqual(ep.image, image) self.assertEqual(ep.explicit, explicit) self.assertEqual(ep.is_closed_captioned, is_closed_captioned) self.assertEqual(ep.position, position) self.assertEqual(ep.withhold_from_itunes, withhold_from_itunes)
def test_idSetToFalseSoEnclosureNotUsed(self): episode = Episode() episode.title = "My first episode" episode.media = Media("http://example.com/podcast/episode1.mp3", 34328731, "audio/mpeg") episode.id = False item = episode.rss_entry() assert item.find("guid") is None
def album(self): page = requests.get(self.album_url, headers=self.header) soup = BeautifulSoup(page.content, "lxml") # 初始化 self.podcast = Podcast() self.podcast.name = soup.find('h1', 'title').get_text() self.podcast.authors.append(Person("Powered by forecho", '*****@*****.**')) self.podcast.website = self.album_url self.podcast.copyright = 'cc-by' if soup.find('div', 'album-intro') and soup.find('div', 'album-intro').get_text(): self.podcast.description = soup.find('div', 'album-intro').get_text() else: self.podcast.description = self.podcast.name self.podcast.language = 'cn' self.podcast.image = soup.find('div', 'album-info').find('img').get('src').split('!')[0] self.podcast.feed_url = 'http://podcast.forecho.com/ximalaya/%s.rss' % self.album_id self.podcast.category = Category('Technology', 'Podcasting') self.podcast.explicit = False self.podcast.complete = False self.podcast.owner = Person("forecho", '*****@*****.**') album_list_content = requests.get(self.album_list_api, headers=self.header).content album_list_data = json.loads(album_list_content.decode('utf-8')) count = len(album_list_data['data']['tracksAudioPlay']) for each in album_list_data['data']['tracksAudioPlay']: try: detail_url = 'http://www.ximalaya.com/tracks/%s.json' % each['trackId'] response = requests.get(detail_url, headers=self.header) item = json.loads(response.content) episode = self.podcast.add_episode() episode.id = str(each['index']) episode.title = each['trackName'] print(self.podcast.name + '=====' + each['trackName']) image = each['trackCoverPath'].split('!')[0] if (image[-4:] == '.gif' or image[-4:] == '.bmp'): episode.image = self.podcast.image else: episode.image = image if item['intro']: episode.summary = item['intro'].replace('\r\n', '') else: episode.summary = each['trackName'] episode.link = 'http://www.ximalaya.com%s' % each['albumUrl'] episode.authors = [Person("forecho", '*****@*****.**')] episode.publication_date = self.reduction_time(item['time_until_now'], item['formatted_created_at']) episode.media = Media(each['src'], each['duration']) episode.position = count - each['index'] + 1 except Exception as e: print('异常:', e) print('异常 URL:', 'http://www.ximalaya.com%s' % each['trackUrl']) traceback.print_exc() # 生成文件 # print self.podcast.rss_str() self.podcast.rss_file('ximalaya/%s.rss' % self.album_id, minimize=True)
def get_episode(self): media = Media(get_download_url(self.relative_path), os.path.getsize(self.full_path), duration=timedelta(seconds=self.duration_seconds)) return Episode(title=self.title, media=media, summary=self.summary, explicit=False, image=self.image_url, publication_date=self.publication_date, subtitle=self.subtitle)
def episode_from_filename(filename): info = get_file_info(filename) media_url = to_slug(filename) return Episode( title=filename, publication_date=info.get('created', datetime.now()), media=Media( os.path.join(config.SERVER_BASEURL, 'episodes', media_url), info.get('size'), None, # type info.get('duration')))
def rssfeed(request, programid): """ Builds the rss feed for a program identified by it's id. (int) 1. Fetches all episodes of the program from the digas db. 2. gets the programinfo from the app db 3. Uses podgen to do the actual XML-generation. """ podcasts = DigasPodcast.objects.using('digas').filter( softdel=0, program=int(programid)).only('program', 'title', 'remark', 'author', 'createdate', 'broadcastdate', 'filename', 'filesize', 'duration', 'softdel').order_by('-createdate') programinfo = ProgramInfo.objects.get(programid=int(programid)) # loading globalsettings here, and not at the module_level # This way django won't explode because of missing # constance_config table when we start on scratch # or set up in a new environment. from .models import globalsettings p = Podcast( name=programinfo.name, subtitle=programinfo.subtitle, description=programinfo.description, website=feed_url(programid), # programinfo.website, explicit=programinfo.explicit, category=Category(programinfo.category), authors=[globalsettings.owner], language=programinfo.language, owner=globalsettings.owner, feed_url=feed_url(programid), new_feed_url=feed_url(programid), image=programinfo.image_url, ) for episode in podcasts: # Get pubdate from createdate or broadcastdate pubdate = digas2pubdate(episode.createdate, episode.broadcastdate) # Add the episode to the list p.episodes.append( Episode( title=episode.title, media=Media(mp3url(episode.filename), episode.filesize), link=mp3url(episode.filename), # multifeedreader uses this. id=guid(episode.filename), summary=episode.remark, publication_date=pubdate)) # send it as unicode rss = u'%s' % p return HttpResponse(rss, content_type='application/xml')
def test_getDuration(self, mock_tinytag, mock_open, mock_rm): # Create our fake requests module mock_requests = mock.Mock() # Prepare the response which the code will get from requests.get() mock_requests_response = mock.Mock() # The content (supposed to be binary mp3 file) mock_requests_response.content = "binary data here" # The content, as returned by an iterator (supposed to be chunks of # mp3-file) mock_requests_response.iter_content.return_value = range(5) # Make sure our fake response is returned by requests.get() mock_requests.get.return_value = mock_requests_response # Return the correct number of seconds from TinyTag seconds = 14 * 60 mock_tinytag.get.return_value.duration = seconds # Now do the actual testing m = Media(self.url, self.size, self.type) m.requests_session = mock_requests m.fetch_duration() self.assertAlmostEqual(m.duration.total_seconds(), seconds, places=0) # Check that the underlying libraries were used correctly self.assertEqual(mock_requests.get.call_args[0][0], self.url) if 'stream' in mock_requests.get.call_args[1] and \ mock_requests.get.call_args[1]['stream']: # The request is streamed, so iter_content was used self.assertEqual(mock_requests_response.iter_content.call_count, 1) fd = mock_open.return_value.__enter__.return_value expected = [((i,),) for i in range(5)] self.assertEqual(fd.write.call_args_list, expected) else: # The entire file was downloaded in one go mock_open.return_value.__enter__.return_value.\ write.assert_called_once_with("binary data here") mock_rm.assert_called_once_with(mock_open.return_value. __enter__.return_value.name)
def test_getDuration(self, mock_tinytag, mock_open, mock_rm): # Create our fake requests module mock_requests = mock.Mock() # Prepare the response which the code will get from requests.get() mock_requests_response = mock.Mock() # The content (supposed to be binary mp3 file) mock_requests_response.content = "binary data here" # The content, as returned by an iterator (supposed to be chunks of # mp3-file) mock_requests_response.iter_content.return_value = range(5) # Make sure our fake response is returned by requests.get() mock_requests.get.return_value = mock_requests_response # Return the correct number of seconds from TinyTag seconds = 14 * 60 mock_tinytag.get.return_value.duration = seconds # Now do the actual testing m = Media(self.url, self.size, self.type) m.requests_session = mock_requests m.fetch_duration() self.assertAlmostEqual(m.duration.total_seconds(), seconds, places=0) # Check that the underlying libraries were used correctly self.assertEqual(mock_requests.get.call_args[0][0], self.url) if 'stream' in mock_requests.get.call_args[1] and \ mock_requests.get.call_args[1]['stream']: # The request is streamed, so iter_content was used self.assertEqual(mock_requests_response.iter_content.call_count, 1) fd = mock_open.return_value.__enter__.return_value expected = [((i, ), ) for i in range(5)] self.assertEqual(fd.write.call_args_list, expected) else: # The entire file was downloaded in one go mock_open.return_value.__enter__.return_value.\ write.assert_called_once_with("binary data here") mock_rm.assert_called_once_with( mock_open.return_value.__enter__.return_value.name)
def test_media(self): media = Media("http://example.org/episodes/1.mp3", 14536453, "audio/mpeg") self.fe.media = media enclosure = self.fe.rss_entry().find("enclosure") self.assertEqual(enclosure.get("url"), media.url) self.assertEqual(enclosure.get("length"), str(media.size)) self.assertEqual(enclosure.get("type"), media.type) # Ensure duck-typing is checked at assignment time self.assertRaises(TypeError, setattr, self.fe, "media", media.url) self.assertRaises(TypeError, setattr, self.fe, "media", (media.url, media.size, media.type))
def generate_rss_from_articles(feed_settings, articles): """ Creates a FeedGenerator feed from a set of feed_entries. :param feed_settings: a feed_settings object containing :param articles: :return: """ # Initialize the feed podcast = Podcast() podcast.name = feed_settings.title author = Person(feed_settings.author['name'], feed_settings.author['email']) podcast.authors.append(author) podcast.website = feed_settings.source_page_url podcast.copyright = feed_settings.copyright podcast.description = feed_settings.subtitle podcast.summary = feed_settings.subtitle podcast.subtitle = feed_settings.subtitle podcast.language = 'vi' podcast.feed_url = feed_settings.output_url podcast.image = feed_settings.img_url podcast.category = Category('Music', 'Music Commentary') podcast.explicit = False # p.complete = False # p.new_feed_url = 'http://example.com/new-feed.rss' podcast.owner = author # p.xslt = "http://example.com/stylesheet.xsl" vt_tz = pytz.timezone('Asia/Ho_Chi_Minh') pastdate = datetime.datetime(2000, 1, 1, 0, 0).astimezone(vt_tz) # podcast.last_updated = datetime.datetime.now(vt_tz) for article in articles: episode = podcast.add_episode() episode.id = article.link episode.title = article.title episode.summary = article.description episode.link = article.link # episode.authors = [Person('Lars Kiesow', '*****@*****.**')] episode.publication_date = article.pub_date pastdate = max(pastdate, article.pub_date) # episode.media = Media.create_from_server_response(article.media, size=None, duration=None) episode.media = Media(article.media, size=None, duration=None, type=article.type) podcast.last_updated = pastdate podcast.publication_date = pastdate return podcast
def main(event, context): dynamodb = boto3.resource('dynamodb', region_name='sa-east-1') table = dynamodb.Table('semservidor-dev') podcasts = table.scan() author = Person("Evandro Pires da Silva", "*****@*****.**") p = Podcast( name="Sem Servidor", description= "Podcast dedicado a arquitetura serverless, com conteúdo de qualidade em português.", website="https://semservidor.com.br", explicit=False, copyright="2020 Evandro Pires da Silva", language="pr-BR", authors=[author], feed_url= "https://3tz8r90j0d.execute-api.sa-east-1.amazonaws.com/dev/podcasts/rss", category=Category("Music", "Music History"), owner=author, image="http://d30gvsirhz3ono.cloudfront.net/logo_semservidor_teste.jpg", web_master=Person(None, "*****@*****.**")) items = podcasts['Items'] for item in items: base_url = "http://d30gvsirhz3ono.cloudfront.net/" file_path = base_url + item['info']['arquivo']['nome'] p.episodes += [ Episode(title=item['info']['episodio'], media=Media(file_path, int(item['info']['arquivo']['tamanho'])), summary=item['info']['descricao'], position=int(item['id'])) ] p.apply_episode_order() rss = p.rss_str() response = { "statusCode": 200, "headers": { "content-type": "application/xml" }, "body": rss } return response
def detail(self, sound_id, date): detail_url = 'http://www.ximalaya.com/tracks/%s.json' % sound_id response = requests.get(detail_url, headers=self.header) item = json.loads(response.content) episode = self.podcast.add_episode() episode.id = str(item['id']) episode.title = item['title'] episode.image = item['cover_url_142'].split('?')[0] episode.summary = (item['intro'].replace('\n', '') if item['intro'] else '') episode.link = 'http://www.ximalaya.com/sound/%d' % item['id'] episode.authors = [Person("forecho", '*****@*****.**')] episode.publication_date = self.reduction_time( date, item['formatted_created_at']) episode.media = Media(item['play_path_64'], 454599964) print self.podcast.name + '=====' + item['title']
def test_strToSize(self): sizes = { "12 kB": 12000, "12 kib": 12288, "15MB": 15000000, "15MiB": 15728640, "0.32GB": 320000000, "0.32GiB": 343597384, "462GB": 462000000000, "4TB": 4000000000000, "4 TiB": 4398046511104, "1 000 KB": 1000000, "145B": 145, } for (str_size, expected_size) in iteritems(sizes): self.assertEqual(expected_size, Media._str_to_bytes(str_size))
def rss(url_token): dropbox_access_token, title, description = get_the_latest_token_info( url_token) urls = get_temporary_link(dropbox_access_token) p = Podcast() p.name = title p.description = description p.website = "https://www.google.com" p.explicit = True for i, (size, url, uid, name) in enumerate(urls): my_episode = Episode() my_episode.title = os.path.splitext(name)[0] my_episode.id = uid my_episode.media = Media(url, size=size, type="audio/mpeg") p.episodes.append(my_episode) return Response(str(p), mimetype='text/xml')
def test_durationToStr(self): m = Media(self.url, self.size, self.type, timedelta(hours=1)) self.assertEqual(m.duration_str, "01:00:00") m.duration = timedelta(days=1) self.assertEqual(m.duration_str, "24:00:00") m.duration = timedelta(minutes=1) self.assertEqual(m.duration_str, "01:00") m.duration = timedelta(seconds=1) self.assertEqual(m.duration_str, "00:01") m.duration = timedelta(days=1, hours=2) self.assertEqual(m.duration_str, "26:00:00") m.duration = timedelta(hours=1, minutes=32, seconds=13) self.assertEqual(m.duration_str, "01:32:13") m.duration = timedelta(hours=1, minutes=9, seconds=3) self.assertEqual(m.duration_str, "01:09:03")
def test_downloadMedia(self): class MyLittleRequests(object): @staticmethod def get(*args, **kwargs): self.assertEqual(args[0], self.url) is_streaming = kwargs.get("stream") class MyLittleResponse(object): if is_streaming: content = "binary content".encode("UTF-8") @staticmethod def iter_content(chunk_size): assert chunk_size is None or chunk_size >= 1024 for char in "binary content": yield char.encode("UTF-8") @staticmethod def raise_for_status(): pass return MyLittleResponse # Test that the given file object is used m = Media(self.url, self.size, self.type) m.requests_session = MyLittleRequests fd = io.BytesIO() m.download(fd) self.assertEqual(fd.getvalue().decode("UTF-8"), "binary content") fd.close() # Test that the given filename is used with tempfile.NamedTemporaryFile(delete=False) as fd: filename = fd.name try: m.download(filename) with open(filename, "rb") as fd: self.assertEqual(fd.read().decode("UTF-8"), "binary content") finally: os.remove(filename)
def test_assigningSize(self): m = Media(self.url, self.size) another_size = 1234567 m.size = another_size assert m.size == another_size
def test_assigningDuration(self): m = Media(self.url, self.size, self.type, self.duration) another_duration = timedelta(hours=0, minutes=32, seconds=23) m.duration = another_duration self.assertEqual(m.duration, another_duration)