def reqData(url, headers = {}, params = None): import requests ses = requests.Session() if params: try: print 'heades', headers res = ses.post(url, headers=headers, params=params, verify=False, timeout=5) print 'res.status_code', res.status_code if res.status_code == 200: return res.content e = res.raise_for_status() print 'error', e printD('Download error', e) return '' except requests.exceptions.RequestException as e: printD('Download error', str(e)) printE() return '' else: try: res = ses.get(url, headers=headers, verify=False, timeout=5) if res.status_code == 200: return res.content e = res.raise_for_status() print 'error', e printD('Download error', e) return '' except requests.exceptions.RequestException as e: printD('Download error', str(e)) printE() return ''
def getuserlist(self): files = [] fullpath = [] user_iptv_file = iptv_folder + 'iptvlist' print "user_iptv_file", user_iptv_file if os.path.exists(user_iptv_file): f = open(user_iptv_file, 'r') lines = f.readlines() print "linesxx", lines for line in lines: line = line.strip() if line.startswith('m3u'): parts = line.split(';') itemtype = parts[0] itempath = parts[2] itemname = parts[1] try: if True: success = downloadfile( itempath, iptv_folder + itemname + '.m3u') printD("success", success) if not os.path.exists(folder + itemname + '.lnk'): cfile = open(folder + itemname + '.lnk', "w") cfile.write(itempath) cfile.close() except: printE() pass if line.startswith('iptv'): parts = line.split(';') itemtype = parts[0] itempath = parts[2] itemname = parts[1] self.addDir(itemname, itempath, 6, 'img/iptv.png', '', 1) if os.path.exists(folder): emptyfolder = True for item in os.listdir(folder): itempath = folder + item if os.path.isfile(itempath): if item.endswith('.m3u'): itemname = item.replace('.m3u', '') icon = self.geticon(itemname) self.addDir(itemname, itempath, 1, icon, '', 1) emptyfolder = False if emptyfolder == True: self.addDir('No m3u file in ' + iptv_folder, "http://", 1, '', '', 1)
def getSearchResult(self, pattern, searchType, page, nextPageCategory, sortBy=''): printDBG( 'YouTubeParser.getSearchResult pattern[%s], searchType[%s], page[%s]' % (pattern, searchType, page)) currList = [] try: url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s&page=%s' % ( pattern, searchType, sortBy, page) printD(url) sts, data = self.cm.getPage(url, {'host': self.HOST}) if sts: nextPage = self.cm.ph.getDataBeetwenMarkers( data, 'page-box', '</div>', False)[1] if nextPage.find('>%d<' % (int(page) + 1)) > -1: nextPage = True else: nextPage = False sp = '<li><div class="yt-lockup' if searchType == 'playlist': m2 = '<div class="branded-page-box' else: m2 = '</ol>' data = CParsingHelper.getDataBeetwenMarkers( data, sp, m2, False)[1] data = data.split(sp) currList = self.parseListBase(data, searchType) print "nextPage", nextPage if len(currList) and nextPage: item = { 'name': 'history', 'type': 'category', 'category': nextPageCategory, 'pattern': pattern, 'search_type': searchType, 'title': _("Next page"), 'page': str(int(page) + 1) } currList.append(item) except Exception as error: printE() printD(str(error)) return [] return currList
def downloadfile(self, url=None, localfile=''): import requests if url and url.startswith('http'): try: r = requests.get(url, timeout=10, verify=False) if r.status_code == 200: with open(localfile, 'wb') as f: f.write(r.content) f.close() return True except: printE() return False
def get_video_url(url = None): error = None if os.path.exists('/tmp/TSmedia/TSmedia_log'): try: os.remove('/tmp/TSmedia/TSmedia_log') except: pass video_id = get_youtube_video_id(url) ql = '2' print 'video_idxxx',video_id video_url = None try: ytdl = YouTubeVideoUrl() if ql == '0': quality = '17' elif ql == '1': quality = '5' elif ql == '2': quality = '18' elif ql == '3': quality = '22' elif ql == '4': quality = '22' elif ql == '5': quality = '37' elif ql == '6': quality = '38' video_url = ytdl.extract(video_id, quality) print 'video_urlxxx',video_url printD('error,video_url', str(video_url)) if video_url.startswith('Error'): return video_url if video_url is None: return ('Error :', ' video_url ' + str(video_url)) if video_url is not None and video_url.startswith('http') and not video_url.startswith('Error') and not video_url.strip() == '': return video_url return 'Error' except Exception as e: printE() printD('url','https://www.youtube.com/watch?v='+video_id) #addDir("Message:"+ str(e),'https://www.youtube.com/watch?v='+video_id,10,"","",1) printD('Error:' + str(e)) return 'https://www.youtube.com/watch?v='+video_id return
def get_video_url(server_url): import requests, re s = requests.Session() r = s.get(server_url) html = r.content html = html.replace("\u0026", "&") html = html.replace("\&", "&") regx = '''hlsManifestUrl(.*?)",''' M3U8Url = re.findall(regx, html, re.M | re.I)[0][13:] print "stream_link3", M3U8Url r = s.get(M3U8Url) data = r.content #stream_links = r.content #print "stream_link2",stream_link #from xbmctools import addDir video_urls = [] print data #quals=['sd','lowest','low','hd','mobile'] quality = '' #line=#EXT-X-STREAM-INF:PROGRAM-ID=1, BANDWIDTH=161349, QUALITY=mobile linkstab = [] try: lines = data.split("\n") i = 0 for line in lines: line = line.strip() if 'QUALITY=' in line: quality = line.split('QUALITY=')[1] if not line.startswith("http"): continue if quality == 'hd': hd = str(line).strip() linkstab.append((quality, str(line).strip())) i = i + 1 return linkstab except: printE() return []
def getSettings(): try: param_metalang = config.TStube.lang.value except: param_metalang = '' try: param_regionid = config.TStube.region.value except: param_regionid = '' try: param_duration = config.TStube.paraduration.value except: param_duration = '' try: order=config.TSmedia.youtubeSortBy.value if order=='': order='date' param_frequency = '&order=' + order except: printE() param_frequency = '&order=date' try: param_3d = config.TStube.para3d.value except: param_3d = '' try: safesearch = config.TStube.safesearch.value except: safesearch = '&safeSearch=none' return param_metalang + param_metalang + param_regionid + param_3d + param_frequency + safesearch
def readUserFile(group='video'): listTab=[] try: if not os.path.exists(userFile): return [] lines = open(userFile).readlines() for line in lines: line = line.strip() if not line == '' and ':' in line: items= line.split(':') if group in items[0].strip().lower(): itemTitle=items[1] itemID=items[2] if len(items)>3: itemImage=items[3] else: itemImage="img/%s.png"%group listTab.append((itemTitle,itemID,itemImage)) except: printE() return listTab
def getMyChannelData(self, cParams): list1 = [] main_url = cParams.get('url', '') main_title = cParams.get('name', '') page = cParams.get('page', '') main_url = main_url + uParams access_token, refresh_token = self.getaccesstoken() url_page = main_url printD('url_page', url_page) content = self.getPage(url_page) try: data = json.loads(content) totalResults = data.get('pageInfo')['totalResults'] except: printE() self.youtube_error('Please refresh login') title = 'Refresh login' url = '' mode = 20 image = 'img/signin.png' page = '' desc = '' extra = {} self.dInfo(title, url, mode, image, 'my channel', page, desc=desc, extra=extra) title = 'New login' url = '' mode = 20 image = 'img/signin.png' page = '' desc = '' self.dInfo(title, url, mode, image, 'my channel', page, desc=desc, extra=extra) return a = 0 l = len(data) if int(totalResults) < 1: addDir('No contents / results found!', '', 1, '', '') self.dInfo('No contents / results found!', '', -1, '', 'my channel', '', desc='', extra='') return for item in data.get('items', {}): kind = item['kind'] if not kind == 'youtube#channel': continue channelid = item['id'] contentDetails = item['contentDetails'] if contentDetails: favorites_id = item['contentDetails']['relatedPlaylists']['favorites'] uploads_id = item['contentDetails']['relatedPlaylists']['uploads'] watchHistory_id = item['contentDetails']['relatedPlaylists']['watchHistory'] watchLater_id = item['contentDetails']['relatedPlaylists']['watchLater'] likes_id = item['contentDetails']['relatedPlaylists']['likes'] url = 'https://www.googleapis.com/youtube/v3/playlists?part=snippet&mine=true&' + '&access_token=' + self.access_token self.dInfo('playlists', url, 100, 'img/playlists.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/subscriptions?part=snippet&mine=true&' + '&access_token=' + self.access_token self.dInfo('Subscriptions', url, 100, 'img/subscriptions.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/activities?part=snippet,id,contentDetails&home=true&forMine=true' + '&access_token=' + self.access_token self.dInfo('Activities', url, 100, 'img/activities.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet,contentDetails,status&playlistId=' + favorites_id + '&mine=true' + '&access_token=' + self.access_token self.dInfo('Favorites', url, 100, 'img/favorites.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet,contentDetails,status&playlistId=' + uploads_id + '&mine=true' + '&access_token=' + self.access_token self.dInfo('My uploads', url, 100, 'img/myuploads.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet,contentDetails,status&playlistId=' + likes_id + '&mine=true' + '&access_token=' + self.access_token self.dInfo('likes', url, 108, 'img/likes.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet,contentDetails,status&playlistId=' + watchLater_id + '&mine=true' + '&access_token=' + self.access_token self.dInfo('WatchLater', url, 100, 'img/watch_later.png', main_title, page, desc='', extra={}) url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet,contentDetails,status&playlistId=' + watchLater_id + '&mine=true' + '&access_token=' + self.access_token self.dInfo('Sign out', url, 106, 'img/signout.png', main_title, page, desc='', extra={})
def getviedoList(self, cParams): default_params = getSettings() uParams = '&maxResults=45' + default_params + '&key=' + KEYV3 uParams2 = uParams.replace('order=viewCount', 'order=relevance') main_title = cParams.get('title', '') main_url = cParams.get('url', '') if 'subscriptions' in main_url: main_url = main_url + uParams2 elif 'key=' not in main_url: main_url = main_url + uParams printD('mainu url', main_url) page = cParams.get('page', '') main_mode = cParams.get('mode', 0) #main_url = main_url + uParams if page == '': url_page = main_url else: url_page = main_url + '&pageToken=' + str(page).strip() print 'url_page', url_page content = reqData(url_page, self.HEADER) if content.strip() == '': return self.youtube_error('download error') try: data = json.loads(content, strict=False) except: printE() c4_browse_ajax = str(data.get('nextPageToken', '')) page = c4_browse_ajax a = 0 try: TotalResults = int(data.get('pageInfo', 0)['totalResults']) except: TotalResults = 0 try: resultsPerPage = int(data.get('pageInfo', 0)['resultsPerPage']) except: resultsPerPage = 0 etag = data.get('etag', 'none') extra = {'TotalResults': TotalResults, 'resultsperpage': resultsPerPage, 'etag': etag} if TotalResults < 1: return self.youtube_error('download error-No contents / results found!') list_item = 'ItemList' in data['kind'] for item in data.get('items', {}): try: if not list_item: try: kind = item['id']['kind'] except: kind = item['kind'] else: kind = item['kind'] if kind: title = item['snippet']['title'].encode('utf-8') channelId = item['snippet'].get('channelId', '') channelTitle = item['snippet'].get('channelTitle', '') extra.update({'channelId': channelId, 'channelTitle': channelTitle}) desc = item['snippet']['description'].encode('utf-8', 'ignore').replace('&', '_').replace(';', '') if kind.endswith('#video'): try: url = str(item['id']['videoId']) img = str(item['snippet']['thumbnails']['default']['url']) print 'url', url stream_link = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % url if enigmaos == 'oe2.0': title = '\\c0??????? ' + title self.dInfo(title, stream_link, 0, img, main_title, page, desc=desc, extra=extra) except: continue elif kind.endswith('#playlistItem'): try: url = str(item['snippet']['resourceId']['videoId']) img = str(item['snippet']['thumbnails']['default']['url']) url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % url if enigmaos == 'oe2.0': title = '\\c0??????? ' + title self.dInfo(title, url, 0, img, main_title, page, desc=desc, extra=extra) except: pass elif kind.endswith('#channel'): url = str(item['id']['channelId']) try: img = str(item['snippet']['thumbnails']['default']['url']) except: img = '' if enigmaos == 'oe2.0': title = '\\c0000???? ' + title self.dInfo(title, url, 12, img, main_title, page, desc=desc, extra=extra) elif kind.endswith('#playlist'): try: url = str(item['id']['playlistId']) except: url = str(item['id']) try: img = str(item['snippet']['thumbnails']['default']['url']) except: img = '' if enigmaos == 'oe2.0': title = '\\c0000??00 ' + title url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=' + url self.dInfo(title, url, 100, img, main_title, page, desc=desc, extra=extra) elif kind.endswith('#activity'): try: url = url = str(item['snippet']['channelId']) except: url = str(item['id']) try: img = str(item['snippet']['thumbnails']['default']['url']) except: img = '' if enigmaos == 'oe2.0': title = '\\c0000???? ' + title self.dInfo(title, url, 12, img, main_title, page, desc=desc, extra=extra) except: printE() if not c4_browse_ajax == '': self.dInfo('More', main_url, 100, 'img/next.png', main_title, page, desc='', extra={}, dialog='nextpage')