def feed(): feed_url = flask.request.url_root fg = feedgen.feed.FeedGenerator() fg.id(feed_url) fg.title('SNH48 Live') fg.subtitle('SNH48公演录播') fg.author({'name': 'SNH48Live', 'email': '*****@*****.**'}) fg.link(href=feed_url, rel='self', type='application/atom+xml') fg.link(href='https://www.youtube.com/SNH48Live', rel='alternate', type='text/html') fg.logo('https://snh48live.org/static/logo.png') fg.language('zh-cmn-Hans-CN') content_template = jinja2.Template('''\ <p><img src="{{ thumbnail_url }}" alt="{{ video_url }}"></p> {%- for line in description.split('\n') -%} {%- if line -%}<p>{{ line|urlize }}</p>{%- endif -%} {%- endfor -%}''') for video in api.list_videos(youtube): video = attrdict.AttrDict(video).snippet video_id = video.resourceId.videoId video_url = 'https://youtu.be/%s' % video.resourceId.videoId fe = fg.add_entry() fe.id(video_url) fe.link(href=video_url, rel='alternate', type='text/html') fe.title(video.title) fe.published(video.publishedAt) fe.updated(video.publishedAt) fe.content(content_template.render( thumbnail_url=video.thumbnails.maxres.url, video_url=video_url, description=video.description, ), type='html') return flask.Response(fg.atom_str(pretty=True), mimetype='application/xml')
if os.path.exists(sys.argv[1]): f = open(sys.argv[1], "r") for row_num, row in enumerate(csv.reader(f)): if row_num == 0: assert row == header_row, row else: existing_youtube_ids.add(row[0]) existing_rows.append(row) f.close() writer = csv.writer(open(sys.argv[1], "w")) writer.writerow(header_row) writer.writerows(existing_rows) for v in list_videos(): if v["youtube_id"] in existing_youtube_ids: continue if "download_urls" not in v: continue if v["download_urls"] is None: continue url = v["download_urls"].get("m3u8", None) if url is None: continue # Munge the URL to get the low-kbps stream url = re_url.sub("-low.m3u8", url) try: doc = urllib2.urlopen(url).read() duration_match = re_total_dur.search(doc) if duration_match is None: print >> sys.stderr, "No duration match for {0}".format(url)
def videos(**kwargs): resource = urllib.unquote_plus(kwargs['resource']) reliveOnly = kwargs['reliveOnly'] api.list_videos(resource, reliveOnly)
def videos(**kwargs): resource = urllib.unquote_plus(kwargs['resource']) api.list_videos(resource)
if os.path.exists(sys.argv[1]): f = open(sys.argv[1], "r") for row_num, row in enumerate(csv.reader(f)): if row_num == 0: assert row == header_row, row else: existing_youtube_ids.add(row[0]) existing_rows.append(row) f.close() writer = csv.writer(open(sys.argv[1], "w")) writer.writerow(header_row) writer.writerows(existing_rows) for v in list_videos(): if v["youtube_id"] in existing_youtube_ids: continue if "download_urls" not in v: continue if v["download_urls"] is None: continue url = v["download_urls"].get("m3u8", None) if url is None: continue # Munge the URL to get the low-kbps stream url = re_url.sub("-low.m3u8", url) try: doc = urllib2.urlopen(url).read() duration_match = re_total_dur.search(doc) if duration_match is None: print >>sys.stderr, "No duration match for {0}".format(url)
def videos(**kwargs): api.list_videos(urllib.unquote(kwargs['url']))