class YTQuerry: def __init__(self, token): self.token = token self.api = YoutubeDataApi(token) def get_url(self, video_id): return BASE_URL.format(video_id) def search_videos(self, q, results=5): return self.api.search(q=q, max_results=results)
def getResults(query): streamableLinks=[] from youtube_api import YoutubeDataApi yt=YoutubeDataApi(FREE_API_KEY) searches=yt.search(q=query,max_results=10) for i in searches: streamableLinks.append(makeStreamable(i["video_id"])) return streamableLinks
def watchtrailer(request): movie_title = request.GET['movie_title'] movie_year = request.GET['movie_year'] #print(movie_year) #print(movie_title) yt = YoutubeDataApi(youtube_APIKEY) movie_trailer_search = yt.search(movie_title + ' ' + str(movie_year) + ' trailer', max_results=1) movie_trailer_search = movie_trailer_search[0] movie_trailer_id = movie_trailer_search['video_id'] return redirect('https://www.youtube.com/watch?v=' + movie_trailer_id)
async def yt(ctx, *, query): yt = YoutubeDataApi(ytid) searches = yt.search(str(query), max_results=3) result = yt.get_video_metadata( searches[0]["video_id"], part=['id', 'snippet', 'contentDetails', 'statistics']) searches.clear() del yt url = 'https://www.youtube.com/watch?v=' + result["video_id"] desc = result["video_description"].split('\n')[0] if len(desc) > 300: desc = desc[:300] + "..." embed = discord.Embed(colour=0xff0000, title=result["video_title"], description=desc, url=url) embed.set_author(name=result["channel_title"]) embed.set_thumbnail(url=result["video_thumbnail"]) embed.add_field(name="Views", value=str(result["video_view_count"]), inline=True) embed.add_field(name="Comments", value=str(result["video_comment_count"]), inline=True) embed.add_field(name="Duration", value=str(result["duration"])[2:-1].replace("M", ":").replace( "H", ":"), inline=True) embed.add_field(name="Likes", value=str(result["video_like_count"]), inline=True) embed.add_field(name="Dislikes", value=str(result["video_dislike_count"]), inline=True) embed.set_footer(text=datetime.utcfromtimestamp( int(result["video_publish_date"])).strftime('%Y-%m-%d %H:%M:%S')) await ctx.send(embed=embed)
def download_thumbnails(data_dir, api_key): """ Download youtube thumbnails for the top 50 results of each search term. Args: data_dir: (str) Directory to save images to. api_key: (str) Your personal YouTube API key. Follow the steps to get one here https://developers.google.com/youtube/v3/getting-started. Returns: True if completed successfully. """ with open(api_key, "r") as f: api_key = f.read() os.makedirs(data_dir, exist_ok=True) yt = YoutubeDataApi(api_key) search_number = 50 search_terms = [ "makeup", "grwm", "GRWM", "makeup haul", "plt", "pretty little thing", "first impressions", "beauty", "kbeauty", "instabaddie", "makeup tutorial", "everyday makeup", "best makeup of 2019", "the power of makeup", "glam makeup", "full face of", "eyeshadow palette", "beauty products", "makeup routine", "get ready with me", "missguided", "iluvsarahii", "jeffree star makeup", "nikkietutorials", "instagram baddie", "0-100 makeup transformation", "glow up", "best makeup of 2018", "best makeup of 2017", "best makeup transformations 2019", "best makeup transformations 2018", "best makeup transformations 2017", "full face of", "makeup i hate", "nye grwm", "nye glam", "smoky eye tutorial", "drugstore makeup tutorial", "drugstore makeup 2019", "drugstore makeup 2018", "mmmmitchell", "catfish makeup", "no makeup makeup", "boyfriend choose my makeup", "kkw x mario", "roxxsaurus makeup revolution", "nikita dragun makeup", "holiday makeup", "makeup hacks", "2020 grwm", "24hr glow up", "full face drugstore makeup", "makeup for school", "everyday makeup routine 2018", "hd brows foundation", "grunge makeup", "natural soft makeup", "autumn makeup", "jamie genevieve", ] if len(search_terms) == 0: print("Exiting...") return None print( f"Searching for the top {search_number} results (" f"{search_number * len(search_terms)} videos)" ) for search_for in tqdm(search_terms): response = yt.search( q=search_for, max_results=search_number, parser=None ) if len(response) < 1: continue for i, item in enumerate(response): if "medium" in item["snippet"]["thumbnails"].keys(): thumbnail = item["snippet"]["thumbnails"]["medium"]["url"] elif "default" in item["snippet"]["thumbnails"].keys(): thumbnail = item["snippet"]["thumbnails"]["default"]["url"] else: continue fpath = os.path.join( data_dir, f"{str(i)}_{search_for}_" + os.path.basename(thumbnail), ) urllib.request.urlretrieve(thumbnail, fpath) with open(fpath.replace("jpg", "json"), "w") as fname: json.dump(item, fname) return True
from youtube_api import YoutubeDataApi import pandas as pd from utils import search_dict from utils import API_KEY query = 'Juul' youtube = YoutubeDataApi(key=API_KEY) searches = youtube.search(q=query) print(len(searches)) searches = [search_dict(search) for search in searches] searches_df = pd.DataFrame(searches) searches_df.to_csv('{}_searches.csv'.format(query), index=False)
import json import requests import os import youtube_api from youtube_api import YoutubeDataApi import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) api_key="AIzaSyDCKVodrvehRJjeAETCNn0OjQRU3SAk5jo" yt = YoutubeDataApi(api_key) resultNum = 5 searchingFor = input("Enter what you would like to search for: ") resultNum = int(input("Enter how many results you would like to have: ")) searches = yt.search(q=searchingFor, max_results=resultNum) for var in list(range(resultNum)): searchDict = searches[var] video_id=searchDict["video_id"] searchUrl="https://www.googleapis.com/youtube/v3/videos?id="+video_id+"&key="+api_key+"&part=contentDetails" response = requests.get(searchUrl, verify=False) data = json.loads(response.text) all_data=data['items'] contentDetails=all_data[0]['contentDetails'] duration=contentDetails['duration'] print( searchDict["video_title"] + " "+ duration)