def channel_playlists_html(polymer_json): microformat = polymer_json[1]['response']['microformat']['microformatDataRenderer'] channel_url = microformat['urlCanonical'].rstrip('/') channel_id = channel_url[channel_url.rfind('/')+1:] try: items = polymer_json[1]['response']['continuationContents']['gridContinuation']['items'] except KeyError: response = polymer_json[1]['response'] try: contents = response['contents'] except KeyError: items = [] else: item_section = tab_with_content(contents['twoColumnBrowseResultsRenderer']['tabs'])['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents'][0] try: items = item_section['gridRenderer']['items'] except KeyError: if "messageRenderer" in item_section: items = [] else: raise items_html = grid_items_html(items, {'author': microformat['title']}) return yt_channel_items_template.substitute( header = common.get_header(), channel_title = microformat['title'], channel_tabs = channel_tabs_html(channel_id, 'Playlists'), avatar = '/' + microformat['thumbnail']['thumbnails'][0]['url'], page_title = microformat['title'] + ' - Channel', items = items_html, page_buttons = '', number_of_results = '', )
def get_comments_page(query_string): parameters = urllib.parse.parse_qs(query_string) ctoken = default_multi_get(parameters, 'ctoken', 0, default='') replies = False if not ctoken: video_id = parameters['video_id'][0] parent_id = parameters['parent_id'][0] ctoken = comment_replies_ctoken(video_id, parent_id) replies = True result = parse_comments(request_comments(ctoken, replies), replies) comments_html, ctoken = get_comments_html(result) if ctoken == '': more_comments_button = '' else: more_comments_button = more_comments_template.substitute( url=URL_ORIGIN + '/comments?ctoken=' + ctoken) return yt_comments_template.substitute( header=common.get_header(), comments=comments_html, page_title='Comments', more_comments_button=more_comments_button, )
def channel_about_page(polymer_json): avatar = '/' + polymer_json[1]['response']['microformat']['microformatDataRenderer']['thumbnail']['thumbnails'][0]['url'] # my goodness... channel_metadata = tab_with_content(polymer_json[1]['response']['contents']['twoColumnBrowseResultsRenderer']['tabs'])['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents'][0]['channelAboutFullMetadataRenderer'] channel_links = '' for link_json in channel_metadata.get('primaryLinks', ()): channel_links += channel_link_template.substitute( url = html.escape(link_json['navigationEndpoint']['urlEndpoint']['url']), text = common.get_plain_text(link_json['title']), ) stats = '' for stat_name in ('subscriberCountText', 'joinedDateText', 'viewCountText', 'country'): try: stat_value = common.get_plain_text(channel_metadata[stat_name]) except KeyError: continue else: stats += stat_template.substitute(stat_value=stat_value) try: description = common.format_text_runs(common.get_formatted_text(channel_metadata['description'])) except KeyError: description = '' return yt_channel_about_template.substitute( header = common.get_header(), page_title = common.get_plain_text(channel_metadata['title']) + ' - About', channel_title = common.get_plain_text(channel_metadata['title']), avatar = html.escape(avatar), description = description, links = channel_links, stats = stats, channel_tabs = channel_tabs_html(channel_metadata['channelId'], 'About'), )
def channel_videos_html(polymer_json, current_page=1, number_of_videos = 1000, current_query_string=''): microformat = polymer_json[1]['response']['microformat']['microformatDataRenderer'] channel_url = microformat['urlCanonical'].rstrip('/') channel_id = channel_url[channel_url.rfind('/')+1:] try: items = polymer_json[1]['response']['continuationContents']['gridContinuation']['items'] except KeyError: response = polymer_json[1]['response'] try: contents = response['contents'] except KeyError: items = [] else: items = tab_with_content(contents['twoColumnBrowseResultsRenderer']['tabs'])['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents'][0]['gridRenderer']['items'] items_html = grid_items_html(items, {'author': microformat['title']}) return yt_channel_items_template.substitute( header = common.get_header(), channel_title = microformat['title'], channel_tabs = channel_tabs_html(channel_id, 'Videos'), avatar = '/' + microformat['thumbnail']['thumbnails'][0]['url'], page_title = microformat['title'] + ' - Channel', items = items_html, page_buttons = common.page_buttons_html(current_page, math.ceil(number_of_videos/30), URL_ORIGIN + "/channel/" + channel_id + "/videos", current_query_string), number_of_results = '{:,}'.format(number_of_videos) + " videos", )
def get_playlists_list_page(): page = '''<ul>\n''' list_item_template = Template( ''' <li><a href="$url">$name</a></li>\n''') for name in get_playlist_names(): page += list_item_template.substitute( url=html.escape(common.URL_ORIGIN + '/playlists/' + name), name=html.escape(name)) page += '''</ul>\n''' return common.yt_basic_template.substitute( page_title="Local playlists", header=common.get_header(), style='', page=page, )
def get_playlist_page(query_string): parameters = urllib.parse.parse_qs(query_string) playlist_id = parameters['list'][0] page = parameters.get("page", "1")[0] if page == "1": first_page_json = playlist_first_page(playlist_id) this_page_json = first_page_json else: tasks = ( gevent.spawn(playlist_first_page, playlist_id ), gevent.spawn(get_videos_ajax, playlist_id, page) ) gevent.joinall(tasks) first_page_json, this_page_json = tasks[0].value, tasks[1].value try: video_list = this_page_json['content']['section_list']['contents'][0]['contents'][0]['contents'] except KeyError: video_list = this_page_json['content']['continuation_contents']['contents'] videos_html = '' for video_json in video_list: info = common.ajax_info(video_json) videos_html += common.video_item_html(info, common.small_video_item_template) metadata = common.ajax_info(first_page_json['content']['playlist_header']) video_count = int(metadata['size'].replace(',', '')) page_buttons = common.page_buttons_html(int(page), math.ceil(video_count/20), common.URL_ORIGIN + "/playlist", query_string) html_ready = common.get_html_ready(metadata) html_ready['page_title'] = html_ready['title'] + ' - Page ' + str(page) stats = '' stats += playlist_stat_template.substitute(stat=html_ready['size'] + ' videos') stats += playlist_stat_template.substitute(stat=html_ready['views']) return yt_playlist_template.substitute( header = common.get_header(), videos = videos_html, page_buttons = page_buttons, stats = stats, **html_ready )
def get_local_playlist_page(name): videos_html = '' with open(os.path.join(playlists_directory, name + ".txt"), 'r', encoding='utf-8') as file: videos = file.read() videos = videos.splitlines() for video in videos: try: info = json.loads(video) info['thumbnail'] = common.get_thumbnail_url(info['id']) videos_html += common.video_item_html( info, common.small_video_item_template) except json.decoder.JSONDecodeError: pass return local_playlist_template.substitute(page_title=name + ' - Local playlist', header=common.get_header(), videos=videos_html, title=name, page_buttons='')
def channel_search_page(polymer_json, query, current_page=1, number_of_videos = 1000, current_query_string=''): microformat = polymer_json[1]['response']['microformat']['microformatDataRenderer'] channel_url = microformat['urlCanonical'].rstrip('/') channel_id = channel_url[channel_url.rfind('/')+1:] response = polymer_json[1]['response'] try: items = tab_with_content(response['contents']['twoColumnBrowseResultsRenderer']['tabs'])['sectionListRenderer']['contents'] except KeyError: items = response['continuationContents']['sectionListContinuation']['contents'] items_html = list_items_html(items) return yt_channel_items_template.substitute( header = common.get_header(), channel_title = html.escape(query + ' - Channel search'), channel_tabs = channel_tabs_html(channel_id, '', query), avatar = '/' + microformat['thumbnail']['thumbnails'][0]['url'], page_title = microformat['title'] + ' - Channel', items = items_html, page_buttons = common.page_buttons_html(current_page, math.ceil(number_of_videos/29), URL_ORIGIN + "/channel/" + channel_id + "/search", current_query_string), number_of_results = '', )
def get_search_page(query_string, parameters=()): qs_query = urllib.parse.parse_qs(query_string) if len(qs_query) == 0: return common.yt_basic_template.substitute( page_title="Search", header=common.get_header(), style='', page='', ) query = qs_query["query"][0] page = qs_query.get("page", "1")[0] autocorrect = int(qs_query.get("autocorrect", "1")[0]) sort = int(qs_query.get("sort", "0")[0]) info = get_search_json(query, page, autocorrect, sort) estimated_results = int(info[1]['response']['estimatedResults']) estimated_pages = ceil(estimated_results / 20) results = info[1]['response']['contents'][ 'twoColumnSearchResultsRenderer']['primaryContents'][ 'sectionListRenderer']['contents'][0]['itemSectionRenderer'][ 'contents'] corrections = '' result_list_html = "" for renderer in results: type = list(renderer.keys())[0] if type == 'shelfRenderer': continue if type == 'didYouMeanRenderer': renderer = renderer[type] corrected_query_string = urllib.parse.parse_qs(query_string) corrected_query_string['query'] = [ renderer['correctedQueryEndpoint']['searchEndpoint']['query'] ] corrected_query_url = URL_ORIGIN + '/search?' + common.make_query_string( corrected_query_string) corrections = did_you_mean.substitute( corrected_query_url=corrected_query_url, corrected_query=common.format_text_runs( renderer['correctedQuery']['runs']), ) continue if type == 'showingResultsForRenderer': renderer = renderer[type] no_autocorrect_query_string = urllib.parse.parse_qs(query_string) no_autocorrect_query_string['autocorrect'] = ['0'] no_autocorrect_query_url = URL_ORIGIN + '/search?' + common.make_query_string( no_autocorrect_query_string) corrections = showing_results_for.substitute( corrected_query=common.format_text_runs( renderer['correctedQuery']['runs']), original_query_url=no_autocorrect_query_url, original_query=html.escape( renderer['originalQuery']['simpleText']), ) continue result_list_html += common.renderer_html( renderer, current_query_string=query_string) page = int(page) if page <= 5: page_start = 1 page_end = min(9, estimated_pages) else: page_start = page - 4 page_end = min(page + 4, estimated_pages) result = Template(yt_search_results_template).substitute( header=common.get_header(query), results=result_list_html, page_title=query + " - Search", search_box_value=html.escape(query), number_of_results='{:,}'.format(estimated_results), number_of_pages='{:,}'.format(estimated_pages), page_buttons=page_buttons_html(page_start, page_end, page, query), corrections=corrections) return result