Beispiel #1
0
class YoutubeAPI(AliceSkill):
    """
	Author: Psychokiller1888
	Description: Access and manage your youtube account
	"""
    def __init__(self):
        super().__init__()
        try:
            self._youtube: Optional[YouTubeDataAPI] = None
        except ValueError:
            raise SkillStartingFailed('Youtube api key not valid')

    def onStart(self):
        super().onStart()
        self._youtube = YouTubeDataAPI(self.getConfig('youtubeApiKey'))
        if not self._youtube.verify_key():
            raise SkillStartingFailed('Youtube api key not valid')

    @IntentHandler('GetYoutubeChannelStats')
    def getChannelStats(self, session: DialogSession, **_kwargs):
        metadata = self._youtube.get_channel_metadata('')

        if not metadata:
            self.endDialog(sessionId=session.sessionId,
                           text=self.randomTalk(text='error'))
            return

        print(metadata)

        self.endDialog(sessionId=session.sessionId,
                       text=self.randomTalk(text='myText', replace=[sub]))
Beispiel #2
0
class YTSearch:
    def __init__(self):
        self.api_key = 'api_key'
        self.yt = YouTubeDataAPI(self.api_key)

    def get_videos(
        self, key_words
    ):  # key_word = co wyszukiwac czy spokojne czy jakies do rozruszenia
        videos_ids = []
        for k in key_words:
            if self.yt.verify_key():
                results = self.yt.search(parser=None,
                                         q=k,
                                         max_results=round(200 /
                                                           len(key_words)),
                                         type="videos",
                                         videoEmbeddable="true")
                for result in results:
                    videos_ids.append(result['id']['videoId'])
        np.unique(videos_ids)
        random.shuffle(videos_ids)
        #print("Zaleziono: "+str(len(videos_ids)))
        # print("Id")
        # print(videos_ids)
        return videos_ids

    def update(self, key_word):
        result = []
        low = ["calm music", "sad music"]
        high = ["hard rock music", 'dance music 90s']
        #print("Searching for: "+key_word)
        if key_word == "high":  # puls za wysoki, dla uspokojenia
            videos_ids = self.get_videos(
                low)  # wypluwa id playlist i linki do nich
            result = videos_ids

        elif key_word == "low":  # puls za niski, dla rozruszania
            videos_ids = self.get_videos(
                high)  # wypluwa id playlist i linki do nich
            result = videos_ids

        return result
Beispiel #3
0
import pickle
import time

# topic information
TOPIC = 'Coronavirus'
SEARCH_TERMS = ['coronavirus']
# ['Judy Mikovits','Bill Gates coronavirus','QAnon coronavirus','Coronavirus Vaccination',
#                'Wuhan lab', 'bioweapon coronavirus','5G coronavirus','coronavirus flu'
#                 ,'dean koontz darkness']  ##adding eveloped search terms here
VIDEOS_PER_QUERY = 10

# Check youtube api
API_KEY = 'your api key'

yt_api = YouTubeDataAPI(API_KEY)
yt_api.verify_key()

Metadata = lambda vid_list: yt_api.get_video_metadata(vid_list)
''' collect videos' metadata
'''


def get_metadata(vid_list, VID_SEEN):
    if not vid_list: return [], [0]
    try:
        metadata_list = Metadata(vid_list)
    except Exception as exc:
        print('>>> cannot retrieve the metadata_list for [{}...] as {}'.format(
            vid_list[0], exc))
        return [], [0]
    VID_SEEN = set(list(VID_SEEN))
Beispiel #4
0
class YTSearch:
    def __init__(self):
        self.api_key = 'AIzaSyC1GsiEj0D0zcQAyayaaCNa8oXeh9vU2bw'
        self.yt = YouTubeDataAPI(self.api_key)

    def get_playlists(
        self, key_word
    ):  # key_word = co wyszukiwac czy spokojne czy jakies do rozruszenia XD
        playlists_ids = []
        playlists_links = []
        if self.yt.verify_key():
            results = self.yt.search(parser=None,
                                     q=key_word,
                                     max_results=5,
                                     search_type="playlist")
            #print(results)
            for result in results:
                playlists_ids.append(result['id']['playlistId'])
                playlists_links.append(
                    "https://www.youtube.com/playlist?list=" +
                    result['id']['playlistId'])
        return playlists_ids, playlists_links

    def get_videos_from_playlist(self, playlists_ids):
        videos_ids = []
        videos_links = []
        # unikalne wartosci:
        playlists_ids = set(playlists_ids)
        for ids in playlists_ids:
            videos = self.yt.get_videos_from_playlist_id(playlist_id=ids)
            for video in videos:
                videos_links.append("https://www.youtube.com/watch?v=" +
                                    video['video_id'])
                videos_ids.append(video['video_id'])
        return videos_ids, videos_links


#TODO : add counter ??? zeby user mial niepowtarzajace sie dane

    def update(self, key_word):  #odświerzanie danych raz na 24 h
        #zapisz jako plik
        urls = []
        if key_word == "high":  # puls za wysoki, dla uspokojenia
            playlists_ids, playlists_links = self.get_playlists(
                "calm music")  # wypluwa id playlist i linki do nich
            videos_ids, videos_links = self.get_videos_from_playlist(
                playlists_ids)  #linki do muzyki z danej playlisty
            urls = videos_links
            print(videos_links)
            print(videos_ids)
            data = {'YT_data_high': []}
            for ids, link in zip(videos_ids, videos_links):
                data['YT_data_high'].append({
                    'video_id': ids,
                    'video_link': link
                })
            print(len(data['YT_data_high']))
            with open('Data/YT_data_high.txt', 'w') as outfile:
                json.dump(data, outfile)
        elif key_word == "low":  # puls za niski, dla rozruszania
            playlists_ids, playlists_links = self.get_playlists(
                "hard rock")  # wypluwa id playlist i linki do nich
            videos_ids, videos_links = self.get_videos_from_playlist(
                playlists_ids)  # linki do muzyki z danej playlisty
            urls = videos_links
            print(videos_links)
            print(videos_ids)
            data = {'YT_data_low': []}
            for ids, link in zip(videos_ids, videos_links):
                data['YT_data_low'].append({
                    'video_id': ids,
                    'video_link': link
                })
            print(len(data['YT_data_low']))
            with open('Data/YT_data_low.txt', 'w') as outfile:
                json.dump(data, outfile)
        return urls
import pandas as pd
from youtube_api import YouTubeDataAPI
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer

analyser = SentimentIntensityAnalyzer()

api_key = 'AIzaSyBR2kc8R5EzD1rnOjyXZfEL1FOGLKojsg4'
yt = YouTubeDataAPI(api_key)

#video parts
video_parts = ['statistics', 'snippet', 'contentDetails', 'topicDetails']

#check api key is valid
if yt.verify_key():
    sonic = yt.get_video_metadata('szby7ZHLnkA', parser=None, part=video_parts)

    sonic_comments = yt.get_video_comments('szby7ZHLnkA', max_results=100)
    df_comments = pd.DataFrame(sonic_comments)

    df_graph_data = pd.DataFrame(columns=[
        'comment_id', 'commenter_channel_id', 'channel_country', 'text',
        'date', 'neg', 'neu', 'pos', 'compound'
    ])

    for index, row in df_comments.iterrows():
        channel_id = df_comments.iloc[0].commenter_channel_id
        channel_data = yt.get_channel_metadata(channel_id)

        score = analyser.polarity_scores(row['text'])
        graph_row = {
            'comment_id': row['comment_id'],