def getPlayer(session, id): params = {'v': id} url = 'https://www.youtube.com/watch' content = session.get(url, params=params).text dummy, i = util.substr('ytplayer.config', '= ', content) player = json.loads(util.parseBrackets(content, i, ['{', '}'])) return player
def getSavedPlaylists(session): result = [] channel = google.getUserDetails(session)['channel'] content = session.get(youtubeUrl + 'channel/' + channel + '/playlists' + '?' + urllib.urlencode({ 'sort': 'dd', 'view_as': 'subscriber', 'view': '52', 'shelf_id': '0' })).text dummy, i = util.substr('[{"gridRenderer":{"items"', ':', content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for item in data: try: count = item['gridPlaylistRenderer']['videoCountShortText'][ 'simpleText'] except: count = '' result.append({ 'id': item['gridPlaylistRenderer']['playlistId'], 'name': item['gridPlaylistRenderer']['title']['simpleText'], 'thumb': item['gridPlaylistRenderer']['thumbnail']['thumbnails'][0]['url'], 'count': count, 'privacy': 'Public', 'user': '******' }) return (result)
def searchPlaylists(searchStr, session): result = [] content = session.get(youtubeUrl + 'results' + '?' + urllib.urlencode({ 'search_query': searchStr, 'sp': 'EgIQA1AU' })).text dummy, i = util.substr('"itemSectionRenderer":{"contents"', ':', content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for item in data: if 'playlistRenderer' not in item.keys(): continue result.append({ 'id': item['playlistRenderer']['playlistId'], 'name': item['playlistRenderer']['title']['simpleText'], 'thumb': item['playlistRenderer']['thumbnails'][0]['thumbnails'][0]['url'], 'count': item['playlistRenderer']['videoCount'], 'privacy': 'Public', 'user': '******' }) return (result)
def searchVideos(searchStr, session): result = [] content = session.get(youtubeUrl + 'results' + '?' + urllib.urlencode({ 'search_query': searchStr, 'sp': 'EgIQAVAU' })).text dummy, i = util.substr('"itemSectionRenderer":{"contents"', ':', content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for item in data: if 'videoRenderer' not in item.keys(): continue try: duration = util.timeStrToSeconds( item['videoRenderer']['lengthText']['simpleText']) except: duration = '' result.append({ 'id': item['videoRenderer']['videoId'], 'name': item['videoRenderer']['title']['simpleText'], 'thumb': item['videoRenderer']['thumbnail']['thumbnails'][0]['url'], 'duration': duration, 'privacy': 'Public', 'user': '******' }) return (result)
def getPlaylistVideos(id, session): result = [] content = session.get(youtubeUrl + 'playlist' + '?' + urllib.urlencode({'list': id})).text dummy, i = util.substr('playlistVideoListRenderer":{"contents"', ':', content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for item in data: try: name = item['playlistVideoRenderer']['title']['simpleText'] except: continue try: duration = util.timeStrToSeconds( item['playlistVideoRenderer']['lengthText']['simpleText']) except: duration = '' result.append({ 'id': item['playlistVideoRenderer']['videoId'], 'name': name, 'thumb': item['playlistVideoRenderer']['thumbnail']['thumbnails'][0]['url'], 'duration': duration, 'privacy': 'Public', 'user': '******' }) return (result)
def search(pathCookies, searchStr): result = {'photos': [], 'videos': [], 'albums': []} session = initSession(pathCookies) loginInfo = google.getLoginInfo(session) content = session.get('https://photos.google.com/search/' + searchStr).text dummy, i = util.substr("key: 'ds:0', isError: false , hash:", "return", content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) # util.objToFile(str(data[0]), pathCookies.replace('/cookies','/data.txt')) try: result.update(getContent(data[0])) except: None try: for row in data[3]: result['albums'].append({ 'id': row[0][2][0], 'name': row[2], 'thumb': row[1], 'owner': loginInfo['name'], 'ownerFlag': True, 'photosCount': 5, 'sharedKey': None }) except: None return result
def getPlaylistVideos(id, pathCookies): session = initSession(pathCookies) result = [] content = session.get( youtubeUrl + 'playlist' + '?' + urllib.urlencode({'list': id})).text dummy, i = util.substr ('playlistVideoListRenderer":{"contents"',':',content) data = json.loads(util.parseBrackets(content, i, ['[',']'])) for item in data: if 'playlistVideoRenderer' not in item.keys(): continue content = { 'id': item['playlistVideoRenderer']['videoId'], 'name': '', 'thumb': videoImage(item['playlistVideoRenderer']['videoId']), 'duration': '', 'publishedTime': '', 'viewCount': '', 'owner': '', 'privacy': 'Public', } try: content['name'] = item['playlistVideoRenderer']['title']['simpleText'] except: continue try: content['duration'] = item['playlistVideoRenderer']['lengthText']['simpleText'] except: continue try: content['owner'] = item['playlistVideoRenderer']['shortBylineText']['runs'][0]['text'] except: None result.append(content) return (result)
def getAlbumContent(pathCookies, albumId, sharedKey=None): session = initSession(pathCookies) if sharedKey is None: url = "https://photos.google.com/album/" + albumId else: url = 'https://photos.google.com/share/' + albumId + '?key=' + sharedKey content = session.get(url).text dummy, i = util.substr("key: 'ds:0', isError: false , hash:", "return", content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) return getContent(data[1])
def getPeople(pathCookies): result = [] session = initSession(pathCookies) content = session.get('https://photos.google.com/people').text dummy, i = util.substr("key: 'ds:1'", "return", content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) # util.objToFile(str(data[0][0][0][0]), pathCookies.replace('/cookies','/data.txt')) for row in data[0][0][0][0]: if len(row[1]) < 1: continue id = row[4][0] result.append({'id': row[8], 'name': row[1], 'thumb': row[2]}) return result
def getEpgYandex(id, days): # tz = datetime.datetime.now(timezone('Europe/Moscow')).tzinfo tz = datetime.datetime.now(timezone('Asia/Jerusalem')).tzinfo today = datetime.date.today() programs = [] s = requests.Session() url = 'https://tv.yandex.ru/213/channels/' + id for day in range(0, days): theday = today + datetime.timedelta(days=day) params = {'date': datetime.datetime.strftime(theday, '%Y-%m-%d')} content = s.get(url=url, params=params).text dummy, i = util.substr('"events"', ':', content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for pdata in data: start = datetime.datetime(year=int(pdata['start'][0:4]), month=int(pdata['start'][5:7]), day=int(pdata['start'][8:10]), hour=int(pdata['start'][11:13]), minute=int(pdata['start'][14:16]), tzinfo=tz) stop = datetime.datetime(year=int(pdata['finish'][0:4]), month=int(pdata['finish'][5:7]), day=int(pdata['finish'][8:10]), hour=int(pdata['finish'][11:13]), minute=int(pdata['finish'][14:16]), tzinfo=tz) program = { 'start': start, 'stop': stop, 'title': pdata['program']['title'], 'description': '' } if 'episode' in pdata: if 'title' in pdata['episode']: if pdata['episode']['title'] != program['title']: program['title'] = program['title'] + ', ' + pdata[ 'episode']['title'] if 'description' in pdata['program']: program['description'] = pdata['program'][ 'description'].replace("\n", "") programs.append(program) return programs
def getMyAlbums(pathCookies): session = initSession(pathCookies) loginInfo = google.getLoginInfo(session) result = [] content = session.get("https://photos.google.com/albums").text dummy, i = util.substr("key: 'ds:2', isError: false , hash:", "return", content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for row in data[0]: metadata = row[12]['72930366'] ## ==9 sharedKey = metadata[5] if sharedKey is not None: if len(metadata) != 9: continue result.append({ 'id': row[0], 'sharedKey': sharedKey, 'name': metadata[1], 'thumb': row[1][0], 'tsStart': datetime.datetime.fromtimestamp(metadata[2][0] / 1000), 'tsEnd': datetime.datetime.fromtimestamp(metadata[2][1] / 1000), 'owner': loginInfo['name'], 'ownerFlag': True, 'photosCount': metadata[3], }) # resultsorted = sorted(result, key=lambda k: k['name'], reverse=True) return (result)
def getOtherAlbums(pathCookies): session = initSession(pathCookies) loginInfo = google.getLoginInfo(session) result = [] content = session.get("https://photos.google.com/sharing").text dummy, i = util.substr("key: 'ds:1'", "return", content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) util.objToFile(str(data[0][0]), pathCookies.replace('/cookies', '/data.txt')) for row in data[0]: owner = row[10][0][11][0] if owner == loginInfo['name']: continue result.append({ 'id': row[6], 'sharedKey': row[7], 'name': row[1], 'thumb': row[2][0], 'tsStart': None, 'tsEnd': None, 'photosCount': row[3], 'tsCreated': datetime.datetime.fromtimestamp(row[4] / 1000), 'owner': row[10][0][11][0], 'ownerFlag': False }) return result
def searchVideos(searchStr, pathCookies): session = initSession(pathCookies) result = [] content = session.get( youtubeUrl + 'results' + '?' + urllib.urlencode({'search_query': searchStr, 'sp': 'EgIQAVAU'})).text dummy, i = util.substr ('"itemSectionRenderer":{"contents"',':',content) data = json.loads(util.parseBrackets(content, i, ['[',']'])) for item in data: if 'videoRenderer' not in item.keys(): continue content = { 'id': item['videoRenderer']['videoId'], 'name': item['videoRenderer']['title']['simpleText'], 'thumb': videoImage(item['videoRenderer']['videoId']), 'duration': '', 'publishedTime': '', 'viewCount': '', 'owner': '', 'privacy': 'Public', } try: content['duration'] = item['videoRenderer']['lengthText']['simpleText'] except: continue try: content['publishedTime'] = item['videoRenderer']['publishedTimeText']['simpleText'] except: None try: content['viewCount'] = item['videoRenderer']['viewCountText']['simpleText'].replace(' views','') except: None try: content['owner'] = item['videoRenderer']['ownerText']['runs'][0]['text'] except: None result.append(content) return (result)
def getMyVideos(session): result = [] content = session.get(youtubeUrl + 'my_videos' + '?' + urllib.urlencode({'o': 'U'})).text dummy, i = util.substr('"VIDEO_LIST_DISPLAY_OBJECT"', ':', content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) for item in data: soup = BeautifulSoup( util.unescape(item['html'].decode('unicode_escape')), "html.parser") ptag = soup.find(class_="vm-video-indicators") privacy = 'Public' if not ptag.find(class_='vm-unlisted').parent.has_attr('aria-hidden'): privacy = 'Private' if not ptag.find(class_='vm-private').parent.has_attr('aria-hidden'): privacy = 'Private' try: duration = util.timeStrToSeconds( soup.find(class_="video-time").get_text()) except: duration = '' result.append({ 'id': item['id'], 'name': soup.find(class_="vm-video-title-content").get_text(), 'thumb': videoImage(item['id']), 'duration': duration, 'privacy': privacy, 'user': '******' }) return (result)
def login(pathCookies, email, password): session = initSession() ######## Step 1 ######### content = session.get('https://accounts.google.com/Login').text dummy, i = util.substr('window.WIZ_global_data', '= ', content) data = json.loads(util.parseBrackets(content, i, ['{', '}'])) data = data['OewCAd'] p1 = data = data.split(',')[3].replace('"', '').replace(']', '') soap = BeautifulSoup(content, "html.parser") data = soap.find("div", {"id": "view_container" })['data-initial-setup-data'].replace('%.@.', '[') data = json.loads(data) data = data[13] p2 = json.dumps(data).replace('"', '') ######## Step 2 ######### session.headers.update({ 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8', 'Google-Accounts-XSRF': '1', 'Referer': 'https://accounts.google.com/', 'X-Same-Domain': '1' }) data = { # 'continue': 'https://myaccount.google.com/', 'continue': 'https://accounts.google.com/ManageAccount', 'f.req': '["' + email + '","' + p2 + '",[],null,"IL",null,null,2,false,true,[null,null,[2,1,null,1,"https://accounts.google.com/ServiceLogin?requestPath=%2FLogin&Page=PasswordSeparationSignIn",null,[],4],1,[null,null,[]],null,null,null,true],"' + email + '"]', 'azt': p1, 'deviceinfo': '[null,null,null,[],null,"IL",null,null,[],"GlifWebSignIn",null,[null,null,[]]]', 'gmscoreversion': 'undefined', 'checkConnection': 'youtube:841:0', 'checkedDomains': 'youtube', 'pstMsg': '1' } content = session.post( 'https://accounts.google.com/_/signin/sl/lookup?hl=en&_reqid=73079&rt=j', data=data).text data = json.loads(content.replace(")]}'", "").replace("\n", "")) data = data[0][0][2] p3 = json.dumps(data).replace('"', '') ######## Step 3 ######### data = { 'continue': 'https://accounts.google.com/ManageAccount', # 'continue': 'https://myaccount.google.com/', 'f.req': '["' + p3 + '",null,1,null,[1,null,null,null,["' + password + '",null,true]],[null,null,[2,1,null,1,"https://accounts.google.com/ServiceLogin?requestPath=%2FLogin&Page=PasswordSeparationSignIn",null,[],4],1,[null,null,[]],null,null,null,true]]', 'azt': p1, 'deviceinfo': '[null,null,null,[],null,"IL",null,null,[],"GlifWebSignIn",null,[null,null,[]]]', 'gmscoreversion': 'undefined', 'checkConnection': 'youtube:841:0', 'checkedDomains': 'youtube', 'pstMsg': '1' } content = session.post( 'https://accounts.google.com/_/signin/sl/challenge?hl=en&_reqid=173079&rt=j', data=data).text if 'SID' not in str(session.cookies): raise Exception('Unable to login: Invalid credentials') ######## Step 4 ######### util.resetHeaders(session) content = session.get('https://www.youtube.com/my_videos?o=U').text dummy, i = util.substr("yt.setConfig('GOOGLE_HELP_PRODUCT_DATA'", ', ', content) data = json.loads(util.parseBrackets(content, i, ['{', '}'])) channel = data['channel_external_id'] soap = BeautifulSoup(content, "html.parser") name = soap.find("div", class_='yt-masthead-picker-name').get_text() ### Save login data ### loginInfo = email + '&' + channel + '&' + name + '&' + str(time.time()) util.setCookie(session, 'www.google.com', 'MyLoginInfo', loginInfo) util.saveCookies(session, pathCookies)
def getRecent(pathCookies): session = initSession(pathCookies) content = session.get('https://photos.google.com/?pli=1').text dummy, i = util.substr("key: 'ds:2'", "return", content) data = json.loads(util.parseBrackets(content, i, ['[', ']'])) return getContent(data[0])