Exemplo n.º 1
0
def setup():
	query = raw_input("Enter your query: ")
	argparser.add_argument("--q", help="Search term", default=query) #change the default to the search term you want to search
	argparser.add_argument("--max-results", help="Max results", default=50) #default number of results which are returned. It can vary from 0 - 100
	options = argparser.parse_args()

	page_tokens = {}
	responses = []
	search_response = youtube.search().list( # Call the search.list method to retrieve results matching the specified query term.
		 q=options.q,
		 type="video",
		 part="id,snippet",
		 maxResults=options.max_results
		).execute()	
	responses.append(search_response)

	for i in range(0,4):
		page_tokens['p' + str(i)] = (search_response['nextPageToken'])
		search_response = youtube.search().list( # Call the search.list method to retrieve results matching the specified query term.
		 q=options.q,
		 type="video",
		 part="id,snippet",
		 maxResults=options.max_results,
		 pageToken = page_tokens['p' + str(i)]
		).execute()
		

		responses.append(search_response)

	print json.dumps(responses, indent=4, sort_keys=True)
    def ping_youtube(self, *args, **kwargs):
        '''pings youtube for number of subscribers on a given channel ID'''

        #Youtube API V3 authentication
        #External modules and dependencies are required
        try:
            argparser.add_argument("--message", required=False,
                        help="Text of message to post.")
            args = argparser.parse_args()
            youtube = get_authenticated_service(args)
            #headers = ['youtube_id', 'video_id']

        #Get statistics from channel id 
            for i,ID in enumerate(self.data_dict['youtube_channel_id']):
                subs = youtube.channels().list(part="statistics", 
                    id=ID).execute()
                if subs['items']:
                    #print 'No. of subscribers:',subs['items'][0]['statistics']['subscriberCount']
                    self.data_dict['subscribers '+self.stamp].append(subs['items'][0]['statistics']['subscriberCount'])
                else:
                    #print 'No youtube subscribers found'
                    self.data_dict['subscribers '+self.stamp].append('-')

        #Get video views for given id
            for i,ID in enumerate(self.data_dict['video_id']):
                video = youtube.videos().list(part="statistics", 
                    id=ID).execute()
                if video['items']:
                    #print 'No. of video views:',video['items'][0]['statistics']['viewCount']
                    self.data_dict['video views '+self.stamp].append(video['items'][0]['statistics']['viewCount'])
                else:
                    #print 'No youtube video was found' 
                    self.data_dict['video views '+self.stamp].append('-')
        except Exception, error:
            self.log_error('youtube')
def prepare_parser(tipo):
    now = datetime.now()
    one_day_ago = (now - timedelta(days=1)).strftime("%Y-%m-%d")
    one_year_ago = (now - timedelta(days=tipo)).strftime("%Y-%m-%d")
    argparser.add_argument("--metrics", help="Report metrics",
        default="views,comments,likes,shares,subscribersGained,subscribersLost")
    argparser.add_argument("--start-date", default=one_year_ago,
        help="Start date, in YYYY-MM-DD format")
    argparser.add_argument("--end-date", default=one_day_ago,
        help="End date, in YYYY-MM-DD format")
    argparser.add_argument("--max-results", help="Max results", default=10)
    argparser.add_argument("--sort", help="Sort order", default="-views")
Exemplo n.º 4
0
    def delete_video(self, videoid):

        argparser.add_argument("--video-id")

        arguments = ["--video-id=" + videoid]

        args = argparser.parse_args(arguments)

        tempyou = self.get_authenticated_service(args)

        #try:
        self.pre_delete_video(tempyou, videoid)
        print "Done deleting... Success!"
Exemplo n.º 5
0
def get_top_ten():
    mysearch = "Drake"
    if request.method == "POST":
        rawsearch = request.form["searchstr"]
        mysearch = rawsearch + " music video"

        spotify_results = spotify.search(q="artist:" + rawsearch, type="artist")
        items = spotify_results["artists"]["items"]
    #        if len(items) > 0:
    #            artist = items[0]
    #            Artist_URL = artist['name'], artist['images'][0]['url']
    #            return Artist_URL
    #        top_track_list = spotify.artist_top_tracks(Artist_URL)
    #        x = ""
    #        for track in top_track_list['tracks'][:1]:
    #            track_name = track['name']
    #            x = x + "</br>" + track_name
    #            print(x)

    #            return track_name

    #        if len(mysearch) > 1:
    #            urn = mysearch[1]
    #        else:
    #            urn = 'spotify:artist:3TVXtAsR1Inumwj472S9r4'

    #        sp = spotipy.Spotify()
    #        response = sp.artist_top_tracks(urn)

    #        for track in response['tracks']:
    #            return(track['name'])

    #        mysearch = track_name + "music video"

    argparser = argparse.ArgumentParser()
    print(mysearch)
    argparser.add_argument("--q", help="Search term", default=mysearch)
    argparser.add_argument("--max-results", help="Max results", default=1)
    args = argparser.parse_args()

    name = "Youtify"
    try:
        mysearchreturn = youtube_search(args)
    except HttpError as e:
        print("An HTTP error %d occured:\n%s" % (e.resp.status, e.content))
    if mysearchreturn == False:
        return render_template("Youtify.html", name=name)

    return render_template("Return.html", name=name, myvidid=json.dumps(mysearchreturn), myheight="320")
Exemplo n.º 6
0
def handle(text, mic, profile):

   argparser.add_argument("--q", help="Search term", default="Google")
   argparser.add_argument("--max-results", help="Max results", default=25)
   args = argparser.parse_args()
   
   #asks the song name
   mic.say("Which song do I have to search on youtube?")
   quote = mic.activeListen()
   
   #quote = raw_input("Give the video name ")
   
   try:
   	youtube_search(quote, 25)
   except HttpError, e:
   		print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
 def get_videos_from_channel_after_date(self, channel_id, date_after):
     argparser.add_argument("--q", help="Search term", default="Google")
     argparser.add_argument("--max-results", help="Max results", default=25)
     args = argparser.parse_args()
     options = args
     search_response = self.youtube_build.search().list(
     # channelId=channel_id,
     # part="id,snippet",
     # maxResults=options.max_results,
     # publishedAfter=date_after,
     # type="video"
         channelId=channel_id,
         part="id,snippet",
         maxResults=options.max_results,
         publishedAfter=date_after
     ).execute()
     print(search_response)
Exemplo n.º 8
0
 def __init__(self, **kwargs):
     self.baseuri = kwargs.get("baseuri")
     o = urlparse.urlparse(self.baseuri)
     self.scheme = o[0]
     host_port = o[1].split(":")
     self.host = host_port[0]
     self.path = o.path
     self.port = None
     if len(host_port) > 1:
         self.port = host_port[1]
     self.cookie = kwargs.get("cookie")
     self.client_secrets_file = kwargs.get("client_secrets_file")
     self.client_oauth2_file = kwargs.get("client_oauth2_file")
     self.data_scratch = kwargs.get("data_scratch")
     self.ffmpeg = kwargs.get("ffmpeg")
     self.ffprobe = kwargs.get("ffprobe")
     self.category = kwargs.get("category")
     self.keywords = kwargs.get("keywords")
     self.privacyStatus = kwargs.get("privacyStatus")
     self.text_position = kwargs.get("text_position")
     self.store = HatracStore(
         self.scheme, 
         self.host,
         {'cookie': self.cookie}
     )
     self.catalog = PollingErmrestCatalog(
         self.scheme, 
         self.host,
         self.path.split('/')[-1],
         {'cookie': self.cookie}
     )
     self.mail_server = kwargs.get("mail_server")
     self.mail_sender = kwargs.get("mail_sender")
     self.mail_receiver = kwargs.get("mail_receiver")
     self.logger = kwargs.get("logger")
     argparser.add_argument("--config", required=True, help="YouTube configuration file")
     self.args = argparser.parse_args()
     self.args.category = self.category
     self.args.keywords = self.keywords
     self.args.privacyStatus = self.privacyStatus
     self.args.noauth_local_webserver = True
     self.logger.debug('Upload YouTube Client initialized.')
    def ping_youtube(self):
        '''pings youtube for number of subscribers on a given channel ID'''

        #Youtube API V3 authentication
        #External modules and dependencies are required
        argparser.add_argument("--message", required=False,
                    help="Text of message to post.")
        args = argparser.parse_args()
        youtube = get_authenticated_service(args)

        #Get statistics from channel id 
        for i,ID in enumerate(self.data_dict['youtube_id']):
            try:
                subs = youtube.channels().list(part="statistics", 
                    id=ID).execute()
                print 'No. of subscribers:',subs['items'][0]['statistics']['subscriberCount']
                if i>0:
                    self.data_dict['subscribers '+self.stamp].append(subs['items'][0]['statistics']['subscriberCount'])
                else:
                    self.data_dict['subscribers '+self.stamp] = [subs['items'][0]['statistics']['subscriberCount']]
            except Exception, error:
                self.log_error(error)
Exemplo n.º 10
0
def setup_args():
    argparser.add_argument("--videoid", help="Required; ID for video.")
    argparser.add_argument("--debug", help="Prints all video comments read.", action="store_true")
    argparser.add_argument("--showreason", help="Prints comment that classifies video as weird.", action="store_true")
    args = argparser.parse_args()
    if not args.videoid:
        exit("Please specify videoid using the --videoid= parameter.")
    return args
Exemplo n.º 11
0
def helloWorld():

  mysearch = "bunnies in cups"
  if request.method == 'POST':
    rawsearch = request.form['searchstr']
    mysearch = rawsearch + " trailer"
    

  argparser = argparse.ArgumentParser()
  print(mysearch)
  argparser.add_argument("--q", help="Search term", default=mysearch)
  argparser.add_argument("--max-results", help="Max results", default=10)
  args = argparser.parse_args()
  

  title = "bunnies in cups"
  overview = ""

  if request.method == 'POST':
    s = tmdb.Search()
    r = s.movie(query=str(rawsearch))
  
    print(s.results)
    if len(s.results) > 0:
      title = s.results[0]['title']
      overview = s.results[0]['overview']
  
  name = "Connor and Zach's Mashup"
  try:
    mysearchreturn = youtube_search(args)
  except HttpError as e:
    print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))

  if request.method == 'POST':
    if mysearchreturn == False or len(s.results) == 0:
      return render_template('basic.html', name=name, myvidid=json.dumps(mysearchreturn), myheight='320', title=title, overview=overview)

  return render_template('hello.html', name=name, myvidid=json.dumps(mysearchreturn), myheight='320', title=title, overview=overview)
Exemplo n.º 12
0
 def render(self, search_args):
     argparser.add_argument("--q", help="Search term", default=search_args)
     argparser.add_argument("--max-results", help="Max results", default=25)
     argparser.add_argument("--type", help="Type of result", default="video")
     args = argparser.parse_args()
     try:
         self.__youtube_search(args)
     except HttpError as e:
         print ("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
def main():
    """Main entry point for the script."""
    # the "developer" option specifices the key that uniquely
    # identifies your google account for accesing youtubes data API
    argparser.add_argument("--developerkey", help="Required key" +
                           "for accessing youtubes data API.")
    # The "videoid" option specifies the YouTube video ID that uniquely
    # identifies the video for which the comments will be fetched.
    argparser.add_argument("--videoid", help="Required ID for video" +
                           "for which the comments will be fetched.")
    args = argparser.parse_args()

    if not args.developerkey:
        exit("Please specify key using the --developerkey= parameter.")
    if not args.videoid:
        exit("Please specify videoid using the --videoid= parameter.")

    try:
        service = build('youtube', 'v3', developerKey=args.developerkey)
        result = scrape_all(service, args.videoid)
        comments, replies, unique_count = result[0], result[1], result[2]

        # Convert all comments and replies into something
        # comprehensible to a word counter
        fullwordlist = strip_non_alpha(comments+replies)
        wordlist = remove_stopwords(fullwordlist, stopwords)
        dictionary = word_list_to_freq_dict(wordlist)
        sorteddict = sort_freq_dict(dictionary)

        # Gives us the top 10 words from frequency:
        for word, freq in sorteddict[:10]:
            print word, freq

        print "comment count: %d" % unique_count

    except HttpError, e:
        print "An HTTP error %d occured:\n%s" % (e.resp.status, e.content)
Exemplo n.º 14
0
def search_byterm(keyword):
    for word in keyword:
        key='\"'+str(word)+'\"'
        search_term=key+'"movie review" "vlog"'
        print search_term
        argparser.__init__()
        argparser.add_argument("--q", help="Search term", default=search_term)
        argparser.add_argument("--max-results", help="Max results", default=50)
        argparser.add_argument("--order", help="Order", default='Date')
        args = argparser.parse_args()

        try:
            youtube_search(args)
        except HttpError, e:
            print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
Exemplo n.º 15
0
  print "%s marked as spam succesfully" % (comment["id"])


# Call the API's comments.delete method to delete an existing comment.
def delete_comment(youtube, comment):
  youtube.comments().delete(
    id=comment["id"]
  ).execute()

  print "%s deleted succesfully" % (comment["id"])


if __name__ == "__main__":
  # The "videoid" option specifies the YouTube video ID that uniquely
  # identifies the video for which the comment will be inserted.
  argparser.add_argument("--videoid",
    help="Required; ID for video for which the comment will be inserted.")
  argparser.add_argument("--dfile",
    help="Required; Destination file.")
  # The "text" option specifies the text that will be used as comment.
  # argparser.add_argument("--text", help="Required; text that will be used as comment.")
  args = argparser.parse_args()

  if not args.videoid:
    exit("Please specify videoid using the --videoid= parameter.")
  #if not args.text:
  #  exit("Please specify text using the --text= parameter.")

  youtube = get_authenticated_service(args)
  # All the available methods are used in sequence just for the sake of an example.
  try:
    video_comment_threads = get_comment_threads(youtube, args.videoid, args.dfile)
		if search_result["id"]["kind"] == "youtube#video":
			videos.append("http://www.youtube.com/watch?v=" + search_result["id"]["videoId"])
	return search_response.get("nextPageToken") ;
	
def download_links(linkList, pathWrite):
	''' Download the links '''
	name_num = 0

	for l in linkList:
		y = YouTube(l)
		y.set_filename(str(name_num))
		try:
			video = y.get('mp4', '720p')
			video.download(pathWrite)
			name_num += 1
		except:
			print("Video " + l + " does not meet criteria (mp4,720p)")
				
if __name__ == "__main__":
	argparser.add_argument("--q", help="Search term", default=SEARCH_WORD)
	argparser.add_argument("--max-results", help="Max results", default=50)
	args = argparser.parse_args()
	try:
		for x in range(0, 10):
			nextPageToken = youtube_search(args , nextPageToken)
	except (HttpError, e):
		print ("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
	
	#Downloading list
	download_links(videos, DOWNLOADLOCATION)
	#print("Done")
Exemplo n.º 17
0
def list_playlist_localizations(youtube, playlist_id):
  results = youtube.playlists().list(
    part="snippet,localizations",
    id=playlist_id
  ).execute()

  localizations = results["items"][0]["localizations"]

  for language, localization in localizations.iteritems():
    print ("Playlist title is '%s' and description is '%s' in language '%s'"
           % (localization["title"], localization["description"], language))


if __name__ == "__main__":
  # The "action" option specifies the action to be processed.
  argparser.add_argument("--action", help="Action")
  # The "playlist_id" option specifies the ID of the selected YouTube playlist.
  argparser.add_argument("--playlist_id",
    help="ID for playlist for which the localization will be applied.")
  # The "default_language" option specifies the language of the playlist's default metadata.
  argparser.add_argument("--default_language", help="Default language of the playlist to update.",
    default="en")
  # The "language" option specifies the language of the localization that is being processed.
  argparser.add_argument("--language", help="Language of the localization.", default="de")
  # The "title" option specifies the localized title of the playlist to be set.
  argparser.add_argument("--title", help="Localized title of the playlist to be set.",
    default="Localized Title")
  # The "description" option specifies the localized description of the playlist to be set.
  argparser.add_argument("--description", help="Localized description of the playlist to be set.",
    default="Localized Description")
Exemplo n.º 18
0
  video_response = youtube.videos().list(
    id=video_ids,
    part='snippet, recordingDetails'
  ).execute()

  videos = []

  # Add each result to the list, and then display the list of matching videos.
  for video_result in video_response.get("items", []):
    videos.append("%s, (%s,%s)" % (video_result["snippet"]["title"],
                              video_result["recordingDetails"]["location"]["latitude"],
                              video_result["recordingDetails"]["location"]["longitude"]))

  print "Videos:\n", "\n".join(videos), "\n"


if __name__ == "__main__":
  #argparser.add_argument("--q", help="Search term", default="Singapore")
  argparser.add_argument("--location", help="Location", default="1.3,103.8")
  argparser.add_argument("--location-radius", help="Location radius", default="100km")
  argparser.add_argument("--max-results", help="Max results", default=25)
  #TODO: get the Freebase topic at https://www.freebase.com/
  argparser.add_argument("--topicId", help="Topic Id", default="/m/03mv61") #saf
  #argparser.add_argument("--channelId", help="Channel Id", default="UC4BCUrNe2X5UJkvzeRG12gQ")
  args = argparser.parse_args()

  try:
    youtube_search(args)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
Exemplo n.º 19
0
    credentials = run_flow(flow, storage, args)

  return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
    http=credentials.authorize(httplib2.Http()))

def add_video_to_playlist(youtube,videoID,playlistID):
	add_video_request=youtube.playlistItems().insert(
		part="snippet",
		body={
		    'snippet': {
		      'playlistId': playlistID,
		      'resourceId': {
		        'kind': 'youtube#video',
			'videoId': videoID
		      }
		    }
		}
	).execute()


if __name__ == '__main__':
  argparser.add_argument("--videoId", help="Video file to upload")
  argparser.add_argument("--playlistId", help="playlist ID")
  args = argparser.parse_args()
  youtube = get_authenticated_service(args)
  playlist = {'cong-nghe': 'PLiYWpBi4dlS9FDavsIanMMCHbKEkv5D0k','san-pham': 'PLiYWpBi4dlS9FDavsIanMMCHbKEkv5D0k','bat-dong-san': 'PLiYWpBi4dlS8XYlNQdY8jT7o-F7Fv_VOH','suc-khoe': 'PLiYWpBi4dlS-199bV_GJIY3oECuBbnh5t','du-lich': 'PLiYWpBi4dlS-199bV_GJIY3oECuBbnh5t','the-gioi-do-day': 'PLiYWpBi4dlS-199bV_GJIY3oECuBbnh5t','phap-luat': 'PLiYWpBi4dlS86fQs8kN4llHAzLfds-tsl','tu-van': 'PLiYWpBi4dlS86fQs8kN4llHAzLfds-tsl','doi-song': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','chinh-tri': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','xa-hoi': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','thi-truong': 'PLiYWpBi4dlS_pglumTBr81Ri61d3YiXgt','kinh-te': 'PLiYWpBi4dlS_pglumTBr81Ri61d3YiXgt','giao-duc': 'PLiYWpBi4dlS8QD8DGdJgSVYHXLMK1D8Pe','moi-ngay-mot-cuon-sach': 'PLiYWpBi4dlS8QD8DGdJgSVYHXLMK1D8Pe','tam-long-viet': 'PLiYWpBi4dlS8QD8DGdJgSVYHXLMK1D8Pe','cuoc-song-thuong-ngay': 'PLiYWpBi4dlS8QD8DGdJgSVYHXLMK1D8Pe','cac-mon-khac': 'PLiYWpBi4dlS9njCqjrSqVDz8uBi8SIa05','bong-da': 'PLiYWpBi4dlS9njCqjrSqVDz8uBi8SIa05','the-thao': 'PLiYWpBi4dlS9njCqjrSqVDz8uBi8SIa05','viet-nam-va-the-gioi': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','van-de-hom-nay': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','trong-nuoc': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','tin-tuc': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','the-gioi': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','chuyen-dong-24h': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j','van-hoa-giai-tri':'PLiYWpBi4dlS8Fy8qA81aO7kQjP4L0IjiC', 'tai-chinh':'PLiYWpBi4dlS_pglumTBr81Ri61d3YiXgt', 'toan-canh-the-gioi': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j', 'gioi-tinh': 'PLiYWpBi4dlS86fQs8kN4llHAzLfds-tsl', 'ben-le': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j', 'dang-trong-cuoc-song-hom-nay': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j', 'su-kien-va-binh-luan': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j', 'dan-hoi-bo-truong-tra-loi': 'PLiYWpBi4dlS8QZgQFQBBQkZkcUU9wwm5j', 'truyen-hinh':'PLiYWpBi4dlS8Fy8qA81aO7kQjP4L0IjiC', 'goc-khan-gia':'PLiYWpBi4dlS8Fy8qA81aO7kQjP4L0IjiC'}
  try:
    add_video_to_playlist(youtube, args.videoId, "PLYiqBrpZGvGMeOmg6ZTeAuCm0PrbOrmBc")
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
Exemplo n.º 20
0
                data['title'] = search_result["snippet"]["title"]
                data['videoId'] = 'https://www.youtube.com/watch?v='+search_result["id"]["videoId"]

                locationDictionary = self.getGeolocationDataOfVideo(search_result["id"]["videoId"],youtube)
                data['location'] = str(locationDictionary["latitude"]) + ',' + str(locationDictionary["longitude"])

                #  Converting dictionary to json string
                json_data = json.dumps(data)

                #  Send data to the appropriate elasticseach index as well as sqs queue.
                self.sendDataToAppropriatePlaces(json_data)

            #  Sleeping for a few seconds.
            time.sleep(3)

        #  Closing file handler
        f.close()


if __name__ == "__main__":
  argparser.add_argument("--q", help="Search term", default="airport")
  argparser.add_argument("--max-results", help="Max results", default=5)
  args = argparser.parse_args()

  try:
    #  Creating youtube search object.
    youtubeSearchObj = youtubeSearch()

    youtubeSearchObj.youtube_search(args)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
Exemplo n.º 21
0
    part='snippet, recordingDetails'
  ).execute()

  videos = []

  # Add each result to the list, and then display the list of matching videos.
  for video_result in video_response.get("items", []):
    videos.append("%s, (%s,%s)" % (video_result["snippet"]["title"],
                              video_result["recordingDetails"]["location"]["latitude"],
                              video_result["recordingDetails"]["location"]["longitude"]))

  print "Videos:\n", "\n".join(videos), "\n"


if __name__ == "__main__":
  argparser.add_argument("--q", help="Search term", default="Google")
  argparser.add_argument("--location", help="Location", default="37.42307,-122.08427")
  argparser.add_argument("--location-radius", help="Location radius", default="5km")
  argparser.add_argument("--max-results", help="Max results", default=25)
  args = argparser.parse_args()

  try:
    youtube_search(args)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)

"""import requests
import json

# Make it a bit prettier..
print ("-" * 30)
Exemplo n.º 22
0
  print "First line of caption track: %s" % (subtitle)

# Call the API's captions.delete method to delete an existing caption track.
def delete_caption(youtube, caption_id):
  youtube.captions().delete(
    id=caption_id
  ).execute()

  print "caption track '%s' deleted succesfully" % (caption_id)


if __name__ == "__main__":
  # The "videoid" option specifies the YouTube video ID that uniquely
  # identifies the video for which the caption track will be uploaded.
  argparser.add_argument("--videoid",
    help="Required; ID for video for which the caption track will be uploaded.")
  # The "name" option specifies the name of the caption trackto be used.
  argparser.add_argument("--name", help="Caption track name", default="YouTube for Developers")
  # The "file" option specifies the binary file to be uploaded as a caption track.
  argparser.add_argument("--file", help="Captions track file to upload")
  # The "language" option specifies the language of the caption track to be uploaded.
  argparser.add_argument("--language", help="Caption track language", default="en")
  # The "captionid" option specifies the ID of the caption track to be processed.
  argparser.add_argument("--captionid", help="Required; ID of the caption track to be processed")
  # The "action" option specifies the action to be processed.
  argparser.add_argument("--action", help="Action", default="all")


  args = argparser.parse_args()

  if (args.action in ('upload', 'list', 'all')):
Exemplo n.º 23
0
        print("%-20s" % column_header["name"], )
    print()

    for row in analytics_query_response.get("rows", []):
        for value in row:
            print("%-20s" % value, )
        print()


if __name__ == "__main__":
    now = datetime.now()
    one_day_ago = (now - timedelta(days=1)).strftime("%Y-%m-%d")
    one_week_ago = (now - timedelta(days=7)).strftime("%Y-%m-%d")

    argparser.add_argument("--metrics",
                           help="Report metrics",
                           default="views,comments,likes,dislikes,shares")
    argparser.add_argument("--dimensions",
                           help="Report dimensions",
                           default="video")
    argparser.add_argument("--start-date",
                           default=one_week_ago,
                           help="Start date, in YYYY-MM-DD format")
    argparser.add_argument("--end-date",
                           default=one_day_ago,
                           help="End date, in YYYY-MM-DD format")
    argparser.add_argument("--max-results", help="Max results", default=10)
    argparser.add_argument("--sort", help="Sort order", default="-views")
    args = argparser.parse_args()

    (youtube, youtube_analytics) = get_authenticated_services(args)
Exemplo n.º 24
0
  videos = []
  channels = []
  playlists = []

  # Add each result to the appropriate list, and then display the lists of
  # matching videos, channels, and playlists.
  for search_result in search_response.get("items", []):
    if search_result["id"]["kind"] == "youtube#video":
      videos.append("%s (%s)" % (search_result["snippet"]["title"],
                                 search_result["id"]["videoId"]))
    elif search_result["id"]["kind"] == "youtube#channel":
      channels.append("%s (%s)" % (search_result["snippet"]["title"],
                                   search_result["id"]["channelId"]))
    elif search_result["id"]["kind"] == "youtube#playlist":
      playlists.append("%s (%s)" % (search_result["snippet"]["title"],
                                    search_result["id"]["playlistId"]))

  print "Videos:\n", "\n".join(videos), "\n"
  print "Channels:\n", "\n".join(channels), "\n"
  print "Playlists:\n", "\n".join(playlists), "\n"


if __name__ == "__main__":
  argparser.add_argument("--q", help="Search term", default="Amelia Earhart")
  argparser.add_argument("--max-results", help="Max results", default=25)
  args = argparser.parse_args()

  try:
    youtube_search(args)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
Exemplo n.º 25
0
    except RETRIABLE_EXCEPTIONS, e:
      error = "A retriable error occurred: %s" % e

    if error is not None:
      print error
      retry += 1
      if retry > MAX_RETRIES:
        exit("No longer attempting to retry.")

      max_sleep = 2 ** retry
      sleep_seconds = random.random() * max_sleep
      print "Sleeping %f seconds and then retrying..." % sleep_seconds
      time.sleep(sleep_seconds)

if __name__ == '__main__':
  argparser.add_argument("--file", required=True, help="Video file to upload")
  argparser.add_argument("--title", help="Video title", default="Test Title")
  argparser.add_argument("--description", help="Video description",
    default="Test Description")
  argparser.add_argument("--category", default="22",
    help="Numeric video category. " +
      "See https://developers.google.com/youtube/v3/docs/videoCategories/list")
  argparser.add_argument("--keywords", help="Video keywords, comma separated",
    default="")
  argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
    default=VALID_PRIVACY_STATUSES[2], help="Video privacy status.")
  args = argparser.parse_args()

  if not os.path.exists(args.file):
    exit("Please specify a valid file using the --file= parameter.")
Exemplo n.º 26
0
  storage = Storage("%s-oauth2.json" % sys.argv[0])
  credentials = storage.get()

  if credentials is None or credentials.invalid:
    credentials = run_flow(flow, storage, args)

  return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
    http=credentials.authorize(httplib2.Http()))

# Add the video rating. This code sets the rating to "like," but you could
# also support an additional option that supports values of "like" and
# "dislike."
def like_video(youtube, video_id):
  youtube.videos().rate(
    id=video_id,
    rating="like"
  ).execute()

if __name__ == "__main__":
  argparser.add_argument("--videoid", default="L-oNKK1CrnU",
    help="ID of video to like.")
  args = argparser.parse_args()

  youtube = get_authenticated_service(args)
  try:
    like_video(youtube, args.videoid)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
  else:
    print "%s has been liked." % args.videoid
Exemplo n.º 27
0
#!/usr/bin/python
#get the length of the candidate video via an xml page
from oauth2client.tools import argparser
import re, urllib, requests
import urllib2
import httplib2
import xml.etree.ElementTree as ET

if __name__ == "__main__":
  #example usage: python fet*py --q drphil
  #accept the args and strip off the two quotes passed to escape chars... better way to clean the vars?
  argparser.add_argument("--q", help="video ID", default=None)
  args = argparser.parse_args()
  
  #stripping off the quotes
  arglen = len(args.q)-1
  args.q=args.q[1:arglen]
  #print args.q

  #try:
  #youtube_search(args)
  #except HttpError, e:
  #  print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)

"""""
making http request for xml page:
http://stackoverflow.com/questions/17178483/how-do-you-send-an-http-get-web-request-in-python
http://code.google.com/p/httplib2/wiki/Examples
"""""
#print "testing: "+"https://gdata.youtube.com/feeds/api/videos/"+str(args.q)
h = httplib2.Http()
Exemplo n.º 28
0
    Creates a Google Drive API service object and outputs the names and IDs
    for up to 10 files.
    """
    credentials = get_credentials()
    http = credentials.authorize(httplib2.Http())
    service = discovery.build('drive', 'v3', http=http)

    # account hang.cucku with folder name is films
    folder_id = '0B2ffjUvb0vY0dlZ5U3RCbVA1dDQ'
    folder_id = args.folderid
    file_id = args.fileid
    file_metadata = {
        #'name' : 'photo.jpg',
        'name' : args.name,
        'parents': [ folder_id ]
    }
    # print(file_id)
    # print(file_metadata)
    results = service.files().copy(fileId=file_id, body=file_metadata).execute()

    print('{0}'.format(results.get('id')))

if __name__ == '__main__':
    argparser.add_argument("--fileid", required=True, help="Video file to upload")
    argparser.add_argument("--folderid", required=True, help="Video file to upload")
    argparser.add_argument("--name", required=True, help="Video title")

    args = argparser.parse_args()
    main(args)
Exemplo n.º 29
0
    except RETRIABLE_EXCEPTIONS, e:
      error = "A retriable error occurred: %s" % e

    if error is not None:
      print error
      retry += 1
      if retry > MAX_RETRIES:
        exit("No longer attempting to retry.")

      max_sleep = 2 ** retry
      sleep_seconds = random.random() * max_sleep
      print "Sleeping %f seconds and then retrying..." % sleep_seconds
      time.sleep(sleep_seconds)

if __name__ == '__main__':
  argparser.add_argument("--file", required=False, help="Video file to upload", default=path + file)
  argparser.add_argument("--title", help="Video title", default=file)
  argparser.add_argument("--description", help="Video description",
    default="SUBSCRIBE: http://bit.ly/Oqg3iE\n\nThe Atlantic Coast Conference (ACC) is a collegiate athletic league in the United States. Founded in 1953 in Greensboro, North Carolina, the ACC sanctions competition in twenty-five sports in Division I of the NCAA for its twelve member universities: Boston College Eagles, Clemson Tigers, Duke Blue Devils, Florida State Seminoles, Georgia Tech Yellow Jackets, Miami Hurricanes, North Carolina Tar Heels, North Carolina State Wolfpack, Virginia Cavaliers, Virginia Tech Hokies, and Wake Forest Demon Deacons. In 2011, the conference announced it was adding Syracuse and Pittsburgh to expand to fourteen members beginning in the 2013 academic year. In 2012, the ACC announced it would add Notre Dame in all sports but football and hockey.\n\nConnect with the ACCDigitalNetwork Online:\nVisit the ACC WEBSITE: http://bit.ly/1468lpf\n\nFollow the ACCDN on Twitter: http://bit.ly/16UO9XN\nLike us on Facebook: http://bit.ly/1acUXDq\n\n[FULL VIDEO TITLE]\nhttp://www.youtube.com/user/ACCDigitalNetwork")
  argparser.add_argument("--category", default="17",
    help="Numeric video category. " +
      "See https://developers.google.com/youtube/v3/docs/videoCategories/list")
  argparser.add_argument("--keywords", help="Video keywords, comma separated",
    default="ACCDigitalNetwork, ACC Digital Network, ACCDN, ACC, College Sports, Division I, NCAA, Atlantic Coast Conference, athletics, competition, Ruby Tuesday")
  argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
    default=VALID_PRIVACY_STATUSES[1], help="Video privacy status.")
  args = argparser.parse_args()

  #if not os.path.exists(args.file):
  #  exit("Please specify a valid file using the --file= parameter.")
def post_bulletin(youtube, args):
    body = dict(snippet=dict(description=args.message))

    if args.video_id:
        body["contentDetails"] = dict(bulletin=dict(resourceId=dict(kind="youtube#video", videoId=args.video_id)))

    if args.playlist_id:
        body["contentDetails"] = dict(
            bulletin=dict(resourceId=dict(kind="youtube#playlist", playlistId=args.playlist_id))
        )

    youtube.activities().insert(part=",".join(body.keys()), body=body).execute()


if __name__ == "__main__":
    argparser.add_argument("--message", required=True, help="Text of message to post.")
    argparser.add_argument("--video-id", help="Optional ID of video to post.")
    argparser.add_argument("--playlist-id", help="Optional ID of playlist to post.")
    args = argparser.parse_args()

    # You can post a message with or without an accompanying video or playlist.
    # However, you can't post a video and a playlist at the same time.
    if args.video_id and args.playlist_id:
        exit("You cannot post a video and a playlist at the same time.")

    youtube = get_authenticated_service(args)
    try:
        post_bulletin(youtube, args)
    except HttpError, e:
        print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
    else:
Exemplo n.º 31
0
  channels = []
  playlists = []

  # Add each result to the appropriate list, and then display the lists of
  # matching videos, channels, and playlists.
  for search_result in search_response.get("items", []):
    if search_result["id"]["kind"] == "youtube#video":
      videos.append("%s (%s)" % (search_result["snippet"]["title"],
                                 search_result["id"]["videoId"]))
    elif search_result["id"]["kind"] == "youtube#channel":
      channels.append("%s (%s)" % (search_result["snippet"]["title"],
                                   search_result["id"]["channelId"]))
    elif search_result["id"]["kind"] == "youtube#playlist":
      playlists.append("%s (%s)" % (search_result["snippet"]["title"],
                                    search_result["id"]["playlistId"]))

  print "Videos:\n", "\n".join(videos), "\n"
  print "Channels:\n", "\n".join(channels), "\n"
  print "Playlists:\n", "\n".join(playlists), "\n"


if __name__ == "__main__":
  argparser.add_argument("--q", help="Search term", default="Google")
  argparser.add_argument("--max-results", help="Max results", default=25)
  args = argparser.parse_args()

  try:
    youtube_search(args)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
Exemplo n.º 32
0
  # Call the search.list method to retrieve results matching the specified
  # query term.
  search_response = youtube.search().list(
    q=options.q,
    part="id,snippet",
    maxResults=options.max_results
  ).execute()

  videos = []
  video_id_list = []
  # Add each result to the appropriate list, and then display the lists of
  # matching videos, channels, and playlists.
  for search_result in search_response.get("items", []):
    if search_result["id"]["kind"] == "youtube#video":
      videos.append("%s (%s)" % (search_result["snippet"]["title"],
                                 search_result["id"]["videoId"]))
      video_id_list.append(search_result["id"]["videoId"])

  #print "Videos:\n", "\n".join(videos), "\n"
  print(video_id_list)

if __name__ == "__main__":
  argparser.add_argument("--q", help="Search term", default="kittens")
  argparser.add_argument("--max-results", help="Max results", default=10)
  args = argparser.parse_args()

  try:
    youtube_search(args)
  except HttpError, e:
    print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)