def get_vids(keyword, token=None):
    global vid_count
    res = youtube_search(keyword)
    token = res[0]
    vids = res[1]
    for vid in vids:
        vid_dict['pub_date'].append(vid['snippet']['publishedAt'])
        vid_count += 1
    return token
Esempio n. 2
0
def grab_videos(keyword, token=None):
    res = youtube_search(keyword, token=token)
    token = res[0]
    videos = res[1]
    for vid in videos:
        video_dict['youID'].append(vid['id']['videoId'])
        video_dict['title'].append(vid['snippet']['title'])
        video_dict['pub_date'].append(vid['snippet']['publishedAt'])
    print("added " + str(len(videos)) + " videos to a total of " + str(len(video_dict['youID'])))
    return token
def grab_videos(keyword, token=None):
    res = youtube_search(keyword)
    token = res[0]
    videos = res[1]
    category=keyword.replace(" ","")
    for vid in videos:
        video_dict['youID'].append(vid['id']['videoId'])
        video_dict['title'].append(vid['snippet']['title'])
        video_dict['description'].append(vid['snippet']['description'])
        video_dict['category'].append(category)
    return token
def grab_videos(keyword, token=None):
    res = youtube_search(keyword)
    token = res[0]
    videos = res[1]

    for vid in videos:
        video_dict['Video id'].append(vid['id']['videoId'])
        video_dict['Title'].append(vid['snippet']['title'])
        video_dict['Description'].append(vid['snippet']['description'])
        video_dict['Category'].append(keyword)
    print("added " + str(len(videos)) + " videos to a total of " +
          str(len(video_dict['Video id'])))

    return token
Esempio n. 5
0
def update_output_div(input_value):
    searchterm = input_value

    result = youtube_search(searchterm)

    just_json = result[1]
    just_json[1]

    url1 = "https://www.googleapis.com/youtube/v3/search?part=snippet&relatedToVideoId="
    url2 = "&type=video&key="

    myvideoids = []
    myvideotitles = []
    myrange = 3
    for i in range(myrange):
        myvideotitles.append(just_json[i]["snippet"]["title"])
        myvideoids.append(just_json[i]["id"]["videoId"])

    source, target, videotitles = youtube_spider(myvideoids, url1, url2)
    source2, target2, videotitles2 = youtube_spider(target, url1, url2)
    newsource = convertToNumber(source)
    newsource2 = convertToNumber(source2)
    newtarget = convertToNumber(target)
    newtarget2 = convertToNumber(target2)

    nodes = []
    data = {}
    for i, b in zip(videotitles + videotitles2, newsource + newsource2):
        nodes.append(dict(id=b, label=i))
    nodes = [i for n, i in enumerate(nodes) if i not in nodes[n + 1:]]
    data["nodes"] = nodes

    targetids = []
    for a, b in zip(newsource + newsource2, newtarget + newtarget2):
        targetids.append(str(a) + "-" + str(b))

    targets = []
    for a, b, c in zip(targetids, newsource + newsource2,
                       newtarget + newtarget2):
        new_dict = dict(id=a, key=b, to=c)
        new_dict["from"] = new_dict.pop("key")
        targets.append(new_dict)
    targets = [i for n, i in enumerate(targets) if i not in targets[n + 1:]]
    data["edges"] = targets

    return visdcc.Network(id='net',
                          data=data,
                          options=dict(height='500px', width='90%'))
Esempio n. 6
0
def search_videos(search_term, n_results):
    res = youtube_search(q=search_term, max_results=n_results)
    token = res[0]
    videos = res[1]

    video_dict = {'youID': [], 'title': [], 'pub_date': [], 'channel': []}

    for video in videos:
        video_dict['youID'].append(video['id']['videoId'])
        video_dict['title'].append(video['snippet']['title'])
        video_dict['pub_date'].append(video['snippet']['publishedAt'])
        video_dict['channel'].append(video['snippet']['channelTitle'])

    print("added " + str(len(videos)) + " videos to a total of " +
          str(len(video_dict['youID'])))
    return video_dict
Esempio n. 7
0
def getYoutubeLink(search_term):
    search_results_raw = youtube_search(search_term)
    search_results_raw = json.dumps(search_results_raw, indent=4)
    search_results_raw = json.loads(search_results_raw)
    search_results_raw = search_results_raw[1]

    for video in search_results_raw:
        video_title = video['snippet']['title']
        video_id = video['id']['videoId']
        url = "http://www.youtube.com/watch?v=" + video_id
        video = pafy.new(url)
        youtube_length = int(video.length)
        spotify_length = int(track_list[search_term]) / 1000
        difference = abs(youtube_length - spotify_length)
        if (difference < 3):  #if the difference is within 3 seconds
            return url
    return ""
Esempio n. 8
0
def getReply(message):

    # Make the message lower case and without spaces on the end for easier handling
    message = message.lower().strip()
    # This is the variable where we will store our response
    answer = ""

    if "weather" in message:
        answer = "Get the weather using a weather API!"

        # is the keyword "wiki" in the message? Ex: "wiki donald trump"
    elif "wiki" in message:
        message = removeHead(message, "wiki")
      # Get the wikipedia summary for the request
        try:
        # Get the summary off Wikipedia
            answer = wikipedia.summary(message)
        except:
          # handle errors or non specificity errors (ex: there are many people named donald)
            answer = "Request was not found using the wiki library and Twilio. Try to be more specific?"

    # is the keyword "youtube" in the message? Ex: "youtube big bang flower road"
    elif "youtube" in message:
        message = removeHead(message, "youtube")
        try:
            answer = youtube_search(message)
        except:
            answer = "Request was not found on YouTube. Try to be more specific or try again?"

    else:
        answer = "\n Welcome! These are the commands you may use: \nYOUTUBE: \"youtube request\" \nWIKI: \"wikipedia request\"\nWEATHER: \"place\n"

    # Twilio can not send messages over 1600 characters in one message. Wikipedia
    # summaries may have way more than this.
    # So shortening is required (1500 chars is a good bet):
    if len(answer) > 1500:
        answer = answer[0:1500] + "..."

    # return the formulated answer
    return answer
Esempio n. 9
0
def grab_videos(keyword, token):

    # call the "youtube_search" function from the "youtube_videos" file
    # call the Youtube API and list video information given keyword
    res = youtube_search(keyword, token)

    # save token and video information separately
    token = res[0]
    videos = res[1]

    for vid in videos:
        video_dict = {}
        # save each video ID, channel ID, type, and index in the dictionary
        video_dict['youID'] = vid['id']['videoId']
        video_dict['type'] = keyword
        video_dict['channelID'] = vid['snippet']['channelId']
        index = len(video_dict['youID'])
        video_ID.append(vid['id']['videoId'])
        # append each video information to list
        video_info.append(video_dict)

# return page token, in order to retrieve next token
    return token
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())

if os.getenv('SPOTIFY_PLAYLIST') is not None:
    playlist = os.getenv('SPOTIFY_PLAYLIST')
else:
    playlist = sys.argv[1]
results = sp.playlist(playlist, fields="tracks,next")
tracks = results['tracks']
show_tracks(tracks)
while tracks['next']:
    tracks = sp.next(tracks)
    show_tracks(tracks)
savedindex = 0
try:
    with open(last_saved_file, 'r') as savedspot:
        savedindex = savedspot.read()
except:
    print("No save detected")
for i, song in enumerate(song_list):
    if savedindex > 0 and i < savedindex:
        continue
    video = youtube_search(song)[1][0]
    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
        ydl.download([
            "https://www.youtube.com/watch?v={}".format(video['id']['videoId'])
        ])
    with open(last_saved_file, 'w') as filetowrite:
        filetowrite.write("{}".format(i))
os.remove(last_saved_file)
from youtube_videos import youtube_search
import json
test = youtube_search("coronavirus|covid|wuhan")
print(test)
def get_videos_youtube(input_file, output_file):
    with open(input_file + '.csv', newline='', encoding='utf-8') as input:
        reader = csv.DictReader(input)
        item_list = []
        for row in reader:
            '''
            Change the input column names and # here.
            If changing # of columns- another change should be made later in the code.
            '''
            item_list.append(
                [row['\ufefftitle'], row['type'], row['year'], row['wiki-en']])
        '''
        Change the output column names and # here.
        If changing # of columns- another change should be made later in the code.
        '''
        video_id_list = [('Name', 'Type', 'Year', 'Wiki', 'VideoID')]
        count = 1
        error_list = []
        for item in item_list:
            try:
                '''
                Change the search query here. 
                '''
                search = youtube_search("'" + item[0] + " " + item[1] + "'")
                videos = search[1]
                rand = random.sample(range(10), 8)
                try:
                    for i in range(8):
                        video_id_list.append(
                            (item[0], item[1], item[2], item[3],
                             videos[rand[int(i)]]['id']['videoId']))
                except IndexError:
                    print('IndexError occured when adding item ', item[0], '.')

                token = ''
                videos_unrelated = []
                for i in range(
                        3
                ):  # Will finally get videos from page 3. (To try getting unrelated videos)
                    search_unrelated = youtube_search(
                        "'" + item[0].split(' ', 1)[0] + "'", token=token)
                    token = search_unrelated[0]
                    videos_unrelated = search_unrelated[1]
                video_id_list.append((item[0], item[1], item[2], item[3],
                                      videos_unrelated[0]['id']['videoId']))
                video_id_list.append((item[0], item[1], item[2], item[3],
                                      videos_unrelated[1]['id']['videoId']))
                print('Latest videos for: ', item[0])
                print('Count: ', count, '\n')
                count = count + 1
            except errors.HttpError:
                print('HttpError occured with: ', item[0])
                error_list.append(item[0])

        print(video_id_list)
        print("error list: ", error_list)

        with open(output_file + '.csv', 'w', newline='',
                  encoding='utf-8') as output:
            writer = csv.writer(output)
            writer.writerows(video_id_list)
Esempio n. 13
0
import sys
# sys.path.append('/home/spnichol/Dropbox/youtube_tutorial/')
from youtube_videos import youtube_search
import pandas as pd
import json


test = youtube_search("spinners")

video_dict = {'youID':[], 'title':[], 'pub_date':[]}

just_json = test[1]
len(just_json)


for video in just_json:
    print(video['snippet']['title'])

token = test[0]
youtube_search("spinners", token=token)



video_dict = {'youID':[], 'title':[], 'pub_date':[]}

def grab_videos(keyword, token=None):
    res = youtube_search(keyword, token=token)
    token = res[0]
    videos = res[1]
    for vid in videos:
        video_dict['youID'].append(vid['id']['videoId'])
Esempio n. 14
0
import os
import flask
import google.oauth2.credentials
import google_auth_oauthlib.flow
import googleapiclient.discovery
import sys
sys.path.append("C:/Users/owner/Desktop/CS/hackbu2019/CollabFinder/youtube_tutorial/")
from youtube_videos import youtube_search

test = youtube_search("spinners")
print(test)
Esempio n. 15
0
from youtube_videos import youtube_search
from youtube_videos import geo_query
import httplib2

test = youtube_search("< movie name > trailer")
newlis = test[1]
newdic = dict(newlis[0])
dic2 = dict(newdic['id'])
id = str(dic2['videoId'])
print(id)

link = 'https://www.youtube.com/watch?v='
link = link + id
print(link)

dic3 = dict(geo_query(id))

list2 = [dic3['items']]

dictfinal = dict(list2[0][0]['statistics'])

views = dictfinal['viewCount']
likes = dictfinal['likeCount']
dislikes = dictfinal['dislikeCount']
comments = dictfinal['commentCount']

print('views:: ', views)
print('likes:: ', likes)
print('dislikes:: ', dislikes)
print('comments:: ', comments)
Esempio n. 16
0
import sys
sys.path.append('/home/spnichol/Dropbox/youtube_tutorial/')
from youtube_videos import youtube_search
import pandas as pd
import json

test = youtube_search("spinners",
                      location="40.730610, -73.935242",
                      location_radius="50km")

geo_test = geo_query('r2GYzQvfARo')

location_dict = {"youID": [], "lat": [], "lon": []}
for video in test[1]:
    location_dict['youID'].append((video['id']['videoId']))
    geo = geo_query(video['id']['videoId'])
    location_dict['lat'].append(
        geo['items'][0]['recordingDetails']['location']['latitude'])
    location_dict['lon'].append(
        geo['items'][0]['recordingDetails']['location']['longitude'])
Esempio n. 17
0
from youtube_videos import youtube_search
import json

test = youtube_search('spinner')
# with open("./result0.json",'w',encoding='utf-8') as json_file:
#     json.dump(test[1][0],json_file,ensure_ascii=False, indent=4)
a = 1