def tryTologin(self): printDBG('EuroSportPlayer.tryTologin start') errorMsg = _('Error communicating with the server.') if None == self.loggedIn or self.login != config.plugins.iptvplayer.eurosportplayer_login.value or\ self.password != config.plugins.iptvplayer.eurosportplayer_password.value: self.login = config.plugins.iptvplayer.eurosportplayer_login.value self.password = config.plugins.iptvplayer.eurosportplayer_password.value rm(self.COOKIE_FILE) self.loggedIn = False self.loginMessage = '' if '' == self.login.strip() or '' == self.password.strip(): msg = _( 'The host %s requires subscription.\nPlease fill your login and password in the host configuration - available under blue button.' ) % self.getMainUrl() GetIPTVNotify().push(msg, 'info', 10) return False try: # get token tokenUrl = self.TOKEN_URL sts, data = self.getPage(tokenUrl) printDBG(data) # get config (also with catpcha site-key) sts, data = self.getPage(self.CONFIG_URL) printDBG(data) # solve captcha to login (token, errorMsgTab) = CaptchaHelper().processCaptcha( self.recaptcha_sitekey, self.LOGIN_URL) if not token: printDBG(str(errorMsgTab)) return printDBG('Captcha token :%s' % token) # try to login header = { 'User-Agent': self.USER_AGENT, 'Referer': self.LOGIN_URL, 'x-disco-client': 'WEB:x86_64:WEB_AUTH:1.1.0', 'x-disco-recaptcha-token': token, 'content-type': 'application/json' } postData = { 'credentials': { 'username': self.login, 'password': self.password } } url = "https://eu3-prod-direct.eurosportplayer.com/login" httpParams = { 'header': header, 'with_metadata': True, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE, 'raw_post_data': True } sts, data = self.getPage(url, httpParams, post_data=json_dumps(postData)) ''' good login { "data" : { "attributes" : { "lastLoginTime" : "2019-11-01T21:45:15Z", "realm" : "eurosport", "token" : "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJVU0VSSUQ6ZXVyb3Nwb3J0OmI4OGQ0YTBhLWQwZDctNDdkZi1iMzI5LWJjNmM5ZDNiOTRjYyIsImp0aSI6InRva2VuLThkOWYxMDgwLWUwNGEtNDMyZi04NDY1LWUwYTgyNDljMjEwMyIsImFub255bW91cyI6ZmFsc2UsImlhdCI6MTU3MjY4NDk3MX0.DtSAY9kAVfwcJKhPXczRlPW3CACd6ZmZwZvJilIrlv8" }, "id" : "token-8d9f1080-e04a-432f-8465-e0a8249c2103", "type" : "token" }, "meta" : { "site" : { "attributes" : { "brand" : "eurosport", "websiteHostName" : "it.eurosportplayer.com" }, "id" : "IT", "type" : "site" } } } ''' ''' example: wrong password { "errors" : [ { "status" : "401", "code" : "unauthorized", "id" : "ATwRg09NZG", "detail" : "" } ] } ''' if not sts and '401' in str(data): msg = _('Login failed. Invalid email or password.') GetIPTVNotify().push(msg, 'error', 10) return False else: data = json_loads(data) printDBG(str(data)) self.loggedIn = True except Exception: printExc() printDBG('EuroSportPlayer.tryTologin end loggedIn[%s]' % self.loggedIn) return self.loggedIn
def getLinksForVideo(self, cItem): printDBG("EuroSportPlayer.getLinksForVideo [%s]" % cItem) self.checkLogin() linksTab = [] try: printDBG(str(cItem)) video_id = cItem['video_id'] # open video page video_page_url = cItem['url'] sts, data = self.getPage( video_page_url, { 'header': { 'User-Agent': self.USER_AGENT, 'Referer': video_page_url }, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] # open route json page route_id = cItem.get('route_id', '') if route_id: route = self.espRoutes[route_id] printDBG(json_dumps(route)) #{"attributes": {"url": "/videos/eurosport/world-championship-239400", "canonical": true}, "type": "route", "id": "292e72a63ebcccb480984a84f3497b7702623ab6fe6e7d7d29b1dce79ed3da35"} route_url = self.getFullPath(route['attributes']['url'], 'route') + "?include=default" sts, data = self.getPage(route_url) #if sts: #printDBG('--------------------------------') #printDBG(data) # open video playback json page playback_info_url = self.PLAYBACK_URL.replace( '{%video_id%}', video_id) sts, data = self.getPage( playback_info_url, { 'header': { 'User-Agent': self.USER_AGENT, 'Referer': video_page_url }, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] printDBG('--------------------------------') printDBG(data) j = json_loads(data) s = j['data']['attributes']['streaming'] if 'hls' in s: link_url = strwithmeta(s['hls']['url'], { 'User-Agent': self.USER_AGENT, 'Referer': video_page_url }) if config.plugins.iptvplayer.eurosportplayer_showauto and config.plugins.iptvplayer.eurosportplayer_showauto.value: linksTab.append({'name': 'auto hls', 'url': link_url}) list_of_links = getDirectM3U8Playlist( link_url, checkExt=False, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999) # select links with preferred audio language list_of_links2 = [] ambient_links = [] other_lang_links = [] for l in list_of_links: m = re.findall("res:[ ][0-9]{2,4}x([0-9]{2,4})", l["name"]) if m: #printDBG(str(l)) #printDBG("resolution: %s" % int(m[0])) if (not config.plugins.iptvplayer. eurosportplayer_minres) or int( m[0]) >= int(config.plugins.iptvplayer. eurosportplayer_minres.value): if config.plugins.iptvplayer.eurosportplayer_showlanguage and config.plugins.iptvplayer.eurosportplayer_showlanguage.value: if config.plugins.iptvplayer.eurosportplayer_showlanguage.value == "all" or config.plugins.iptvplayer.eurosportplayer_showlanguage.value in l[ "name"]: list_of_links2.append(l) else: if 'Ambient' in l["name"]: ambient_links.append(l) else: other_lang_links.append(l) else: list_of_links2.append(l) else: list_of_links2.append(l) linksTab.extend(list_of_links2) if not linksTab: linksTab.extend(other_lang_links) linksTab.extend(ambient_links) elif config.plugins.iptvplayer.eurosportplayer_showambient and config.plugins.iptvplayer.eurosportplayer_showambient.value: linksTab.extend(ambient_links) #if 'dash' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'dash', 'url': link_url}) #if 'mss' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'mss', 'url': link_url}) except Exception: printExc() return linksTab
def listOnAir(self, cItem): printDBG("EuroSportPlayer.listOnAir [%s]" % cItem) try: variables = {"uiLang":self.serverApiData['locale']['language'],"mediaRights":["GeoMediaRight"],"preferredLanguages":self.serverApiData['locale']['languageOrder']} url = self.serverApiData['server_path']['search'] + '/persisted/query/eurosport/web/Airings/onAir?variables=' + urllib.quote(json_dumps(variables, separators=(',', ':'))) sts, data = self.getJSPage(url) if not sts: return data = json_loads(data) NOW = datetime.now() for item in data['data']['Airings']: self._addItem(cItem, item, NOW) except Exception: printExc()
def listItems2(self, cItem, nextCategory): printDBG("FreeDiscPL.listItems2 cItem[%s]" % (cItem)) page = cItem.get('page', 0) post_data = { "search_phrase": cItem.get('f_search_pattern', ''), "search_type": cItem.get('f_search_type', ''), "search_saved": 0, "pages": 0, "limit": 0 } if page > 0: post_data['search_page'] = page params = dict(self.defaultParams) params['raw_post_data'] = True params['header'] = dict(self.AJAX_HEADER) params['header']['Referer'] = self.cm.getBaseUrl(self.getMainUrl( )) + 'search/%s/%s' % (cItem.get('f_search_type', ''), urllib.quote(cItem.get('f_search_pattern', ''))) sts, data = self.getPage(cItem['url'], params, json_dumps(post_data)) if not sts: return printDBG(data) try: data = json_loads(data)['response'] logins = data['logins_translated'] translated = data['directories_translated'] for item in data['data_files']['data']: userItem = logins[str(item['user_id'])] dirItem = translated[str(item['parent_id'])] icon = 'http://img.freedisc.pl/photo/%s/7/2/%s.png' % ( item['id'], item['name_url']) url = '/%s,f-%s,%s' % (userItem['url'], item['id'], item['name_url']) title = item['name'] desc = ' | '.join( [item['date_add_format'], item['size_format']]) desc += '[/br]' + (_('Added by: %s, directory: %s') % (userItem['display'], dirItem['name'])) params = dict(cItem) params.update({ 'good_for_fav': True, 'f_user_item': userItem, 'f_dir_item': dirItem, 'category': nextCategory, 'title': self.cleanHtmlStr(title), 'url': self.getFullUrl(url), 'icon': self.getFullIconUrl(icon), 'desc': desc, 'f_type': item.get('type_fk', '') }) if params['f_type'] in ['7', '6']: self.addDir(params) if data['pages'] > page: params = dict(cItem) params.update({ 'good_for_fav': False, 'title': _('Next page'), 'page': page + 1 }) self.addDir(params) except Exception: printExc()
def getFavouriteData(self, cItem): printDBG('TVNowDE.getFavouriteData') params = dict(cItem) params.pop('dashclear', None) return json_dumps(params)
def listRaiSportVideos(self, cItem): printDBG("Raiplay.listRaiSportItem %s" % cItem['title']) key= cItem.get('key','') dominio = cItem.get('dominio','') page = int(cItem.get('page',0)) header = { 'Accept': 'application/json, text/javascript, */*; q=0.01' , 'Content-Type': 'application/json; charset=UTF-8', 'Origin': 'https://www.raisport.rai.it', 'Referer': 'https://www.raisport.rai.it/archivio.html', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest', } pageSize = 50 payload = { "page": page, "pageSize": pageSize, "filters":{ "tematica":[key], "dominio": dominio } } postData=json_dumps(payload) sts, data = self.getPage(self.RAISPORT_SEARCH_URL, {'header' : header, 'raw_post_data':1}, post_data= postData) if sts: j = json_loads(data) if 'hits' in j: h = j['hits'] printDBG(str(h)) if 'hits' in h: for hh in h['hits']: if '_source' in hh: news_type = hh['_source']['tipo'] if news_type == 'Video' and 'media' in hh['_source']: relinker_url = hh['_source']['media']['mediapolis'] if 'durata' in hh['_source']['media']: duration = " - " + _("Duration") + ": " + hh['_source']['media']['durata'] else: duration = "" icon = hh['_source']['immagini']['default'] title = hh['_source']['titolo'] creation_date = hh['_source']['data_creazione'] if 'sommario' in hh['_source']: desc = creation_date + duration + '\n' + hh['_source']['sommario'] else: desc = creation_date + duration params= {'category':'raisport_video', 'title': title, 'desc': desc, 'url': relinker_url, 'icon': icon} printDBG(str(params)) self.addVideo(params) if h['total'] > (page + pageSize): page += pageSize params=dict(cItem) params['title']=_("Next page") params['page'] = page self.addMore(params)
def listVodItems(self, cItem): printDBG("EuroSportPlayer.listVodItems [%s]" % cItem) try: page = cItem.get('page', 1) sportId = cItem['f_sport_id'] vodType = cItem.get('f_vod_type', 'all') variables = { "uiLang": self.serverApiData['locale']['language'], "mediaRights": ["GeoMediaRight"], "preferredLanguages": self.serverApiData['locale']['languageOrder'] } url = self.serverApiData['server_path'][ 'search'] + '/persisted/query/eurosport/web/ondemand/' + vodType if vodType == 'all': variables = { "pageSize": 30, "page": page, "uiLang": self.serverApiData['locale'], "mediaRights": ["GeoMediaRight"], "preferredLanguages": self.serverApiData['locale']['languageOrder'], "must": { "termsFilters": [{ "attributeName": "category", "values": ["%s" % sportId] }] } } url += '?variables=' else: variables = { "pageSize": 30, "page": page, "uiLang": self.serverApiData['locale']['language'], "mediaRights": ["GeoMediaRight"], "preferredLanguages": self.serverApiData['locale']['languageOrder'], "category": "%s" % sportId } url += '/category?variables=' url += urllib.quote(json_dumps(variables, separators=(',', ':'))) sts, data = self.getJSPage(url) if not sts: return if vodType == 'all': data = json_loads(data)['data']['bucket'] all = data['meta']['hits'] data = data['aggs'][0]['buckets'][0] else: data = json_loads(data)['data']['query'] all = data['meta']['hits'] NOW = datetime.now() for item in data['hits']: self._addItem(cItem, item['hit'], NOW) if page * 30 < all: params = dict(cItem) params.pop('priv_item', None) params.update({ 'good_for_fav': False, 'title': _('Next page'), 'page': page + 1 }) self.addDir(params) except Exception: printExc()
def getSearchResult(self, pattern, searchType, page, nextPageCategory, sortBy='A', url=''): printDBG( 'YouTubeParser.getSearchResult pattern[%s], searchType[%s], page[%s]' % (pattern, searchType, page)) currList = [] try: #url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s&page=%s' % (pattern, searchType, sortBy, page) nextPage = {} nP = {} nP_new = {} r2 = [] if url: # next page search url = strwithmeta(url) if 'post_data' in url.meta: http_params = dict(self.http_params) http_params['header']['Content-Type'] = 'application/json' http_params['raw_post_data'] = True sts, data = self.cm.getPage(url, http_params, url.meta['post_data']) else: sts, data = self.cm.getPage(url, self.http_params, self.postdata) if sts: response = json_loads(data) else: # new search # url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s' % (pattern, searchType, sortBy) url = 'https://www.youtube.com/results?search_query=' + pattern + '&sp=' if searchType == 'video': # url += 'EgIQAQ%253D%253D' url += 'CA%sSAhAB' % sortBy if searchType == 'channel': # url += 'EgIQAg%253D%253D' url += 'CA%sSAhAC' % sortBy if searchType == 'playlist': # url += 'EgIQAw%253D%253D' url += 'CA%sSAhAD' % sortBy if searchType == 'live': url += 'EgJAAQ%253D%253D' sts, data = self.cm.getPage(url, self.http_params) if sts: self.checkSessionToken(data) data2 = self.cm.ph.getDataBeetwenMarkers( data, "window[\"ytInitialData\"] =", "};", False)[1] if len(data2) == 0: data2 = self.cm.ph.getDataBeetwenMarkers( data, "var ytInitialData =", "};", False)[1] response = json_loads(data2 + "}") if not sts: return [] # printDBG("-------- response ------------") # printDBG(json_dumps(response)) # printDBG("------------------------------") # search videos r2 = list(self.findKeys(response, 'videoRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getVideoData(item) if params: printDBG(str(params)) currList.append(params) # search channels r2 = list(self.findKeys(response, 'channelRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getChannelData(item) if params: printDBG(str(params)) currList.append(params) #search playlists r2 = list(self.findKeys(response, 'playlistRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getPlaylistData(item) if params: printDBG(str(params)) currList.append(params) nP = list(self.findKeys(response, "nextContinuationData")) nP_new = list(self.findKeys(response, "continuationEndpoint")) if nP: nextPage = nP[0] # printDBG("-------------- nextPage -------------------------") # printDBG(json_dumps(nextPage)) # printDBG("-------------------------------------------------") ctoken = nextPage["continuation"] itct = nextPage["clickTrackingParams"] try: label = nextPage["label"]["runs"][0]["text"] except: label = _("Next Page") urlNextPage = self.updateQueryUrl( url, { 'pbj': '1', 'ctoken': ctoken, 'continuation': ctoken, 'itct': itct }) params = { 'type': 'more', 'category': "search_next_page", 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) elif nP_new: printDBG("-------------------------------------------------") printDBG(json_dumps(nP_new)) printDBG("-------------------------------------------------") nextPage = nP_new[0] ctoken = nextPage["continuationCommand"]["token"] itct = nextPage["clickTrackingParams"] label = _("Next Page") urlNextPage = "https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8" post_data = { 'context': { 'client': { 'clientName': 'WEB', 'clientVersion': '2.20201021.03.00', } }, } post_data['continuation'] = ctoken post_data['context']['clickTracking'] = { 'clickTrackingParams': itct } post_data = json_dumps(post_data).encode('utf-8') urlNextPage = strwithmeta(urlNextPage, {'post_data': post_data}) params = { 'type': 'more', 'category': "search_next_page", 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) except Exception: printExc() return currList
def listSearchItems(self, cItem): printDBG("EuroSportPlayer.listSearchItems [%s]" % cItem) try: page = cItem.get('page', 1) variables = {"index":"eurosport_global","preferredLanguages":["pl","en"],"uiLang":"pl","mediaRights":["GeoMediaRight"],"page":page,"pageSize":20,"q":cItem['f_query'],"type":["Video","Airing","EventPage"],"include_images":True} url = self.serverApiData['server_path']['search'] + '/persisted/query/core/sitesearch?variables=' + urllib.quote(json_dumps(variables, separators=(',', ':'))) sts, data = self.getJSPage(url) if not sts: return data = json_loads(data)['data']['sitesearch'] NOW = datetime.now() for item in data['hits']: self._addItem(cItem, item['hit'], NOW) if page*20 < data['meta']['hits']: params = dict(cItem) params.pop('priv_item', None) params.update({'good_for_fav':False, 'title':_('Next page'), 'page':page+1}) self.addDir(params) except Exception: printExc()
def decryptSignatures(self, encSignatures, playerUrl): decSignatures = [] code = '' jsname = 'ytsigndec' jshash = 'hash7_' + playerUrl.split('://', 1)[-1] if not is_js_cached(jsname, jshash): # get main function sts, self.playerData = self.cm.getPage(playerUrl) if not sts: return [] t1 = time.time() code = [] mainFunctionName = self._findMainFunctionName() if not mainFunctionName: SetIPTVPlayerLastHostError( _('Encryption function name extraction failed!\nPlease report the problem to %s' ) % 'https://github.com/OpenVisionE2/e2iplayer-ov/issues') return [] printDBG("mainFunctionName >> %s" % mainFunctionName) mainFunction = self._findFunction(mainFunctionName) if not mainFunction: SetIPTVPlayerLastHostError( _('Encryption function body extraction failed!\nPlease report the problem to %s' ) % 'https://github.com/OpenVisionE2/e2iplayer-ov/issues') return [] code.append(mainFunction) funNames = self._getAllLocalSubFunNames(mainFunction) for funName in funNames: fun = self._findFunction(funName) code.insert(0, fun) objects = self._getAllObjectsWithMethods(mainFunction) for objName, methods in objects.iteritems(): obj = self._findObject(objName, methods) code.insert(0, obj) code.append( 'e2i_dec=[];for (var idx in e2i_enc){e2i_dec.push(%s(e2i_enc[idx]));};print(JSON.stringify(e2i_dec));' % mainFunctionName) code = '\n'.join(code) printDBG("---------------------------------------") printDBG("| ALGO FOR SIGNATURE DECRYPTION |") printDBG("---------------------------------------") printDBG(code) printDBG("---------------------------------------") else: printDBG("USE ALGO FROM CACHE: %s" % jshash) js_params = [{'code': 'e2i_enc = %s;' % json_dumps(encSignatures)}] js_params.append({'name': jsname, 'code': code, 'hash': jshash}) ret = js_execute_ext(js_params) if ret['sts'] and 0 == ret['code']: try: decSignatures = json_loads(ret['data']) except Exception: printExc() return decSignatures
def listSchedule(self, cItem): printDBG("EuroSportPlayer.listSchedule [%s]" % cItem) def _dateStr(d): return d.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z' try: cItem = dict(cItem) sData = cItem.pop('f_sdate') eData = cItem.pop('f_edate') variables = {"startDate":_dateStr(sData - self.OFFSET),"endDate":_dateStr(eData - self.OFFSET),"uiLang":self.serverApiData['locale']['language'],"mediaRights":["GeoMediaRight"],"preferredLanguages":self.serverApiData['locale']['languageOrder']} url = self.serverApiData['server_path']['search'] + '/persisted/query/eurosport/web/Airings/DateRange?variables=' + urllib.quote(json_dumps(variables, separators=(',', ':'))) sts, data = self.getJSPage(url) if not sts: return data = json_loads(data) data['data']['Airings'].sort(key=lambda item: item['startDate']) #, reverse=True) NOW = datetime.now() for item in data['data']['Airings']: if item.get('playbackUrls', []) in (None, []): continue self._addItem(cItem, item, NOW, sData+self.OFFSET-timedelta(days=1), eData+self.OFFSET+timedelta(days=1)) except Exception: printExc()
def listEventsItems(self, cItem): printDBG("EuroSportPlayer.listEventsItems [%s]" % cItem) try: contentId = cItem['f_content_id'] variables = {"contentId":contentId,"include_media":True,"include_images":True,"uiLang":self.serverApiData['locale']['language'],"mediaRights":["GeoMediaRight"],"preferredLanguages":self.serverApiData['locale']['languageOrder']} url = self.serverApiData['server_path']['search'] + '/persisted/query/eurosport/web/EventPageByContentId?variables=' + urllib.quote(json_dumps(variables, separators=(',', ':'))) sts, data = self.getJSPage(url) if not sts: return data = json_loads(data) name = cItem['category'].split('_', 1)[-1] if name == 'airings': fType = '' else: fType = 'VIDEO' for tmp in data['data']['EventPageByContentId']['media']: tmp = tmp['videos'] if name == 'airings': tmp.reverse() NOW = datetime.now() for item in tmp: self._addItem(cItem, item, NOW, fType=fType) except Exception: printExc()
def listEventsCategories(self, cItem, nextCategory): printDBG("EuroSportPlayer.listEventsCategories [%s]" % cItem) def _str2dateShort(txt): date = self._str2date(txt) month = self.ABBREVIATED_MONTH_NAME_TAB[date.month-1] return ' %s %s, %s' % (date.day, self.serverApiData['i18n_dictionary'].get(month, month), date.year) try: if cItem['f_type'] == 'nonolympics': type = 'Non' else: type = '' variables = {"include_images":True,"uiLang":self.serverApiData['locale']['language'],"mediaRights":["GeoMediaRight"],"preferredLanguages":self.serverApiData['locale']['languageOrder']} url = self.serverApiData['server_path']['search'] + '/persisted/query/eurosport/%sOlympicsEventPageAll?variables=%s' % (type, urllib.quote(json_dumps(variables, separators=(',', ':')))) sts, data = self.getJSPage(url) if not sts: return data = json_loads(data) data['data']['EventPageAll'].sort(key=lambda item: item['eventDetails'][0]['title']) for item in data['data']['EventPageAll']: title = self.cleanHtmlStr(item['eventDetails'][0]['title']) desc = '%s - %s' % (_str2dateShort(item['startDate']), _str2dateShort(item['endDate'])) icon = item['heroImage'][0]['photos'][0]['imageLocation'] params = dict(cItem) params.update({'good_for_fav':False, 'category':nextCategory, 'title':title, 'desc':desc, 'icon':icon, 'f_content_id':item['contentId']}) self.addDir(params) except Exception: printExc()
def getFavouriteData(self, cItem): try: return json_dumps(cItem) except Exception: printExc() return ''
def getLinksForVideo(self, cItem): printDBG("Ekstraklasa.getLinksForVideo '%s'" % cItem) url = cItem.get('url', '') if not url: scheduleDate = cItem.get('schedule_date', '') if scheduleDate: msg = _("Stream starts from %s") % scheduleDate GetIPTVNotify().push(msg, 'info', 10) return [] if not self.loggedIn: # need to login to view videos self.TryToLogin() if not self.loggedIn: return [] headers = { 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36", 'Accept': 'application/json', 'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3', 'Referer': 'https://www.ekstraklasa.tv/', 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % self.token, 'Origin': 'https://www.ekstraklasa.tv', 'DNT': '1', } linksTab = [] sts, data = self.cm.getPage(url, {'header': headers}) if sts: printDBG("*********************") printDBG(data) printDBG("*********************") response = json_loads(data) url = response['data']['url'] playlist = getDirectM3U8Playlist(url, checkExt=False, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999) if (config.plugins.iptvplayer.ekstraklasa_defaultres.value == None ) or (config.plugins.iptvplayer.ekstraklasa_defaultres.value == '') or (config.plugins.iptvplayer. ekstraklasa_defaultres.value == "0"): linksTab.extend(playlist) else: def_res = int( config.plugins.iptvplayer.ekstraklasa_defaultres.value) printDBG(json_dumps(playlist)) for track in playlist: if int(track.get("bitrate", "0")) < (def_res * 1000): linksTab.append(track) break else: msg = _("You are not allowed to play this video") GetIPTVNotify().push(msg, 'info', 10) return linksTab
def getSearchResult1(self, pattern, searchType, page, nextPageCategory, sortBy='', url=''): printDBG( 'YouTubeParser.getSearchResult pattern[%s], searchType[%s], page[%s]' % (pattern, searchType, page)) currList = [] try: #url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s&page=%s' % (pattern, searchType, sortBy, page) nextPage = {} nP = {} nP_new = {} r2 = [] if url: # next page search sts, data = self.cm.getPage(url, self.http_params, self.postdata) if sts: response = json_loads(data) else: # new search # url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s' % (pattern, searchType, sortBy) url = 'https://www.youtube.com/results?search_query=' + pattern + '&sp=' if searchType == 'video': url += 'EgIQAQ%253D%253D' if searchType == 'channel': url += 'EgIQAg%253D%253D' if searchType == 'playlist': url += 'EgIQAw%253D%253D' if searchType == 'live': url += 'EgJAAQ%253D%253D' sts, data = self.cm.getPage(url, self.http_params) if sts: self.checkSessionToken(data) data2 = self.cm.ph.getDataBeetwenMarkers( data, "window[\"ytInitialData\"] =", "};", False)[1] if len(data2) == 0: data2 = self.cm.ph.getDataBeetwenMarkers( data, "var ytInitialData =", "};", False)[1] response = json_loads(data2 + "}") if not sts: return [] #printDBG("--------------------") #printDBG(json_dumps(response)) #printDBG("--------------------") # search videos r2 = list(self.findKeys(response, 'videoRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getVideoData(item) if params: printDBG(str(params)) currList.append(params) # search channels r2 = list(self.findKeys(response, 'channelRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getChannelData(item) if params: printDBG(str(params)) currList.append(params) #search playlists r2 = list(self.findKeys(response, 'playlistRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getPlaylistData(item) if params: printDBG(str(params)) currList.append(params) nP = list(self.findKeys(response, "nextContinuationData")) nP_new = list(self.findKeys(response, "continuationEndpoint")) if nP: nextPage = nP[0] #printDBG("-------------------------------------------------") #printDBG(json_dumps(nextPage)) #printDBG("-------------------------------------------------") ctoken = nextPage["continuation"] itct = nextPage["clickTrackingParams"] try: label = nextPage["label"]["runs"][0]["text"] except: label = _("Next Page") urlNextPage = self.updateQueryUrl( url, { 'pbj': '1', 'ctoken': ctoken, 'continuation': ctoken, 'itct': itct }) params = { 'type': 'more', 'category': "search_next_page", 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) elif nP_new: printDBG("-------------------------------------------------") printDBG(json_dumps(nP_new)) printDBG("-------------------------------------------------") nextPage = nP_new[0] ctoken = nextPage["continuationCommand"]["token"] itct = nextPage["clickTrackingParams"] label = _("Next Page") urlNextPage = self.updateQueryUrl( url, { 'pbj': '1', 'ctoken': ctoken, 'continuation': ctoken, 'itct': itct }) params = { 'type': 'more', 'category': "search_next_page", 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) except Exception: printExc() return currList
def addVideoFromData(self, videoData, OnlyLive = False, label_format = None, future = False): # printDBG(json_dumps(videoData)) #{"relationships": { # "txSports": {"data": [{"type": "taxonomyNode", "id": "bec78875-c777-4b6b-aa5f-6f73093fef69"}]}, # "txCompetitions": {"data": [{"type": "taxonomyNode", "id": "3cc643aa-be3c-4bbc-b0bd-45537f4f9025"}]}, # "show": {"data": {"type": "show", "id": "5528"}}, # "contentPackages": {"data": [{"type": "package", "id": "Eurosport"}]}, # "primaryChannel": {"data": {"type": "channel", "id": "95"}}, # "txLegs": {"data": [{"type": "taxonomyNode", "id": "cdf73e0d-4662-4034-b238-87de281f89e5"}]}, # "routes": {"data": [{"type": "route", "id": "ba42a747696c2cc69574ee9414806703f3cc4271c97578ed68d795e81f526c3c"}]}, # "txMagazines": {"data": [{"type": "taxonomyNode", "id": "76f872af-e546-4a43-ac15-5a2512c36103"}]}, # "images": {"data": [{"type": "image", "id": "video_250797_80ec9f08-4b37-3033-994d-c492747cbdc7_default_it"}]}, # "txEvents": {"data": [{"type": "taxonomyNode", "id": "8aaff87f-4933-46e6-a9f3-872b336b2d8b"}]} # }, # "attributes": { # "availabilityWindows": [{"playableEnd": "2019-11-26T00:00:00Z", "playableStart": "2019-10-27T08:50:00Z", "package": "Eurosport"}], # "isNew": false, # "publishStart": "2019-10-20T00:00:00Z", # "contentRatings": [], # "alternateId": "world-cup-ambient-sound-250797", # "clearkeyEnabled": true, "secondaryTitle": "Coppa del Mondo di Sci alpino", # "drmEnabled": false, # "contentDescriptors": [], # "sourceSystemId": "eurosport-e14695126c0ch719", # "scheduleStart": "2019-10-27T08:50:00Z", # "description": "La Coppa del Mondo di sci alpino maschile 2019-20 prende il via in Austria, il primo appuntamento \u00e8 a S\u00f6lden.", # "videoDuration": 6060000, # "publishEnd": "2019-11-26T00:00:00Z", # "earliestPlayableStart": "2019-10-27T08:50:00Z", # "path": "eurosport/world-cup-ambient-sound-250797", # "packages": ["Eurosport"], # "videoType": "STANDALONE", # "name": "S\u00f6lden, Gigante uomini (1a manche)", # "rights": {"embeddable": false}, # "geoRestrictions": {"mode": "permit", "countries": ["world"]}, # "identifiers": {"epgId": "1041169", "originalMediaId": "64104745-4d53-40f8-aebd-ded76ebac868", "analyticsId": "e14695126c0ch719", "freewheel": "eurosport-e14695126c0ch719"}, # "scheduleEnd": "2019-10-27T10:50:00Z", # "customAttributes": {"classificationId": "24851"}, # "sourceSystemName": "vdp", # "playableTerritories": {"territories": ["de", "pt", "dk", "lt", "lu", "hr", "lv", "ua", "hu", "mc", "md", "me", "mf", "yt", "ie", "mk", "ee", "ad", "il", "im", "mq", "io", "mt", "is", "al", "it", "va", "am", "es", "vg", "at", "re", "nc", "ax", "az", "je", "ro", "nl", "ba", "no", "rs", "be", "fi", "ru", "bg", "fk", "fo", "fr", "wf", "fx", "se", "si", "by", "sk", "sm", "gb", "ge", "gf", "gg", "gi", "ko", "ch", "gl", "gp", "gr", "ta", "kz", "gy", "tf", "cy", "pf", "cz", "pl", "li", "pm", "tr"], "mode": "permit"}, # "isExpiring": false}, # "type": "video", # "id": "250797" #} params = {} video_id = videoData['id'] item_data = videoData['attributes'] printDBG(json_dumps(item_data)) if 'broadcastType' in item_data: #printDBG(" %s, %s , %s" % (item_data['name'], item_data['videoType'], item_data['broadcastType'] )) bt = item_data['broadcastType'] else: #printDBG(" %s, %s , %s" % (item_data['name'], item_data['videoType'], '' )) bt = item_data['videoType'] if (not OnlyLive) or (item_data['videoType'] == 'LIVE'): if 'scheduleStart' in item_data: start = item_data['scheduleStart'] else: start = item_data['earliestPlayableStart'] #printDBG("start: %s" % start) scheduleDate = self._gmt2local(start) #printDBG("local time: %s" % str(scheduleDate)) if scheduleDate < datetime.now() or future: txtDate=scheduleDate.strftime("%d/%m/%Y") txtTime=scheduleDate.strftime("%H:%M") #"routes": {"data": [{"type": "route", "id": "ba42a747696c2cc69574ee9414806703f3cc4271c97578ed68d795e81f526c3c"}]}, if 'routes' in videoData['relationships']: route_id = videoData['relationships']['routes']['data'][0]['id'] else: route_id = '' if label_format: if label_format == 'schedule': if 'txSports' in videoData['relationships']: sport_node_id = videoData['relationships']['txSports']['data'][0]['id'] sport = self.espTaxonomyNodes[sport_node_id] #printDBG(json_dumps(sport)) txtSport = sport['attributes']['name'] else: txtSport = '' if 'primaryChannel' in videoData['relationships']: channel_id = videoData['relationships']['primaryChannel']['data']['id'] channel = self.espChannels[channel_id] #printDBG(json_dumps(channel)) txtChannel = channel['attributes']['name'] else: txtChannel = '' if bt == 'LIVE': title =" %s %s - %s [%s]" % (txtTime, txtSport.upper(), item_data['name'], bt) else: title =" %s %s - %s - %s" % (txtTime, txtSport.upper(), item_data['name'], txtChannel) # elif altri casi else: title = item_data['name'] + " [%s] - (%s)" % (bt, txtDate ) else: title = item_data['name'] + " [%s] - (%s)" % (bt, txtDate ) desc = "video id: %s\n" % video_id if 'videoDuration' in item_data: desc = desc + _("Duration") + ": %s" % str(timedelta(seconds = int(item_data['videoDuration'] / 1000))) + "\n" if 'secondaryTitle' in item_data: desc = desc + item_data['secondaryTitle'] + "\n" desc = desc + item_data.get('description', '') icon_id = videoData['relationships']['images']['data'][0]['id'] icon = self.espImages[icon_id]['attributes']['src'] url = self.getFullPath(item_data['path'], 'video') params = {'title': title, 'desc': desc, 'url': url, 'icon': icon, 'video_id': video_id, 'schedule_date': scheduleDate, 'route_id': route_id} printDBG(str(params)) return params
def getVideosFromChannelList(self, url, category, page, cItem): printDBG('YouTubeParser.getVideosFromChannelList page[%s]' % (page)) currList = [] try: url = strwithmeta(url) if 'post_data' in url.meta: http_params = dict(self.http_params) http_params['header']['Content-Type'] = 'application/json' http_params['raw_post_data'] = True sts, data = self.cm.getPage(url, http_params, url.meta['post_data']) else: sts, data = self.cm.getPage(url, self.http_params) if sts: if 'browse' in url: # next pages response = json_loads(data)['onResponseReceivedActions'] rr = {} for r in response: if r.get("appendContinuationItemsAction", ""): rr = r break if not rr: return [] r1 = rr["appendContinuationItemsAction"] r4 = r1.get("continuationItems", []) else: # first page of videos self.checkSessionToken(data) data2 = self.cm.ph.getDataBeetwenMarkers( data, "window[\"ytInitialData\"] =", "};", False)[1] if len(data2) == 0: data2 = self.cm.ph.getDataBeetwenMarkers( data, "var ytInitialData =", "};", False)[1] response = json_loads(data2 + "}") r1 = response['contents'][ 'twoColumnBrowseResultsRenderer']['tabs'] r2 = {} for tab in r1: try: if tab['tabRenderer']['content']: r2 = tab['tabRenderer']['content'] except: pass if r2: break r3 = r2['sectionListRenderer']['contents'][0][ 'itemSectionRenderer']['contents'] r4 = r3[0]['gridRenderer'].get('items', '') nextPage = '' for r5 in r4: videoJson = r5.get("gridVideoRenderer", "") nP = r5.get('continuationItemRenderer', '') if videoJson: params = self.getVideoData(videoJson) if params: printDBG(str(params)) currList.append(params) if nP != '': nextPage = nP if nextPage: ctoken = nextPage["continuationEndpoint"][ "continuationCommand"].get('token', '') ctit = nextPage["continuationEndpoint"][ "clickTrackingParams"] try: label = nextPage["nextContinuationData"]["label"][ "runs"][0]["text"] except: label = _("Next Page") urlNextPage = "https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8" post_data = { 'context': { 'client': { 'clientName': 'WEB', 'clientVersion': '2.20201021.03.00', } }, } post_data['continuation'] = ctoken post_data['context']['clickTracking'] = { 'clickTrackingParams': ctit } post_data = json_dumps(post_data).encode('utf-8') urlNextPage = strwithmeta(urlNextPage, {'post_data': post_data}) params = { 'type': 'more', 'category': category, 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) except Exception: printExc() return currList
def getLinksForVideo(self, cItem): printDBG("EuroSportPlayer.getLinksForVideo [%s]" % cItem) self.checkLogin() linksTab = [] try: printDBG(str(cItem)) video_id = cItem['video_id'] # open video page video_page_url = cItem['url'] sts, data = self.getPage( video_page_url, {'header' : {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] # open route json page route_id = cItem.get('route_id', '') if route_id: route = self.espRoutes[route_id] printDBG(json_dumps(route)) #{"attributes": {"url": "/videos/eurosport/world-championship-239400", "canonical": true}, "type": "route", "id": "292e72a63ebcccb480984a84f3497b7702623ab6fe6e7d7d29b1dce79ed3da35"} route_url = self.getFullPath(route['attributes']['url'], 'route') + "?include=default" sts, data = self.getPage(route_url) #if sts: #printDBG('--------------------------------') #printDBG(data) # open video playback json page playback_info_url = self.PLAYBACK_URL.replace('{%video_id%}', video_id) sts, data = self.getPage(playback_info_url, {'header' : {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] printDBG('--------------------------------') printDBG(data) j = json_loads(data) s = j['data']['attributes']['streaming'] if 'hls' in s: link_url = strwithmeta(s['hls']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) linksTab.append({'name':'auto hls', 'url': link_url}) linksTab.extend(getDirectM3U8Playlist(link_url, checkExt=False, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999)) #if 'dash' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'dash', 'url': link_url}) #if 'mss' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'mss', 'url': link_url}) except Exception: printExc() return linksTab
def exploreEpisode(self, cItem): printDBG("Cinemalibero.exploreEpisode cItem[%s]" % cItem) for l in cItem.get('links', []): printDBG(json_dumps(l)) self.addVideo(l)
def getFavouriteData(self, cItem): params = {'type':cItem['type'], 'category':cItem.get('category', ''), 'title':cItem['title'], 'url':cItem['url'], 'desc':cItem['desc'], 'icon':cItem['icon']} return json_dumps(params)
def getVideoLink(self, cItem): printDBG("BilaSportPwApi.getVideoLink") urlsTab = [] sts, data = self.getPage(cItem['url']) if not sts: return urlsTab cUrl = self.cm.meta['url'] baseUrl = cUrl url = self.getFullUrl( ph.search(data, '''['"]([^'^"]*?/iframes/[^'^"]+?)['"]''')[0], cUrl) if not url: return urlsTab sts, data = self.getPage(url) if not sts: return urlsTab cUrl = self.cm.meta['url'] url = self.getFullUrl(ph.search(data, ph.IFRAME)[1], cUrl) if url: sts, data = self.getPage(url) if not sts: return urlsTab cUrl = self.cm.meta['url'] replaceTab = self.cm.ph.getDataBeetwenMarkers(data, 'prototype.open', '};', False)[1] printDBG(replaceTab) replaceTab = re.compile( '''\.replace\(['"](\s*[^'^"]+?)['"]\s*\,\s*['"]([^'^"]+?)['"]''' ).findall(replaceTab) printDBG(replaceTab) if len(replaceTab): scriptUrl = '|' + base64.b64encode( json_dumps(replaceTab).encode('utf-8')) else: scriptUrl = '' tmp = ph.findall(data, ('<script', '>', ph.check(ph.none, ('jsdelivr', )))) for item in tmp: scriptUrl = self.getFullUrl(ph.getattr(item, 'src'), cUrl) break hlsTab = [] hlsUrl = re.compile('''(https?://[^'^"]+?\.m3u8(?:\?[^'^"]+?)?)['"]''', re.IGNORECASE).findall(data) if len(hlsUrl): hlsUrl = hlsUrl[-1] hlsTab = getDirectM3U8Playlist(hlsUrl, checkContent=True, sortWithMaxBitrate=9000000) for idx in range(len(hlsTab)): hlsTab[idx]['need_resolve'] = 1 hlsTab[idx]['url'] = strwithmeta( hlsTab[idx]['url'], { 'name': cItem['name'], 'Referer': url, 'priv_script_url': scriptUrl }) if hlsTab: return hlsTab if 1 == self.up.checkHostSupport(cUrl): return self.up.getVideoLinkExt( strwithmeta(cUrl, {'Referer': baseUrl})) return []
def listVodTypesFilters(self, cItem, nextCategory): printDBG("EuroSportPlayer.listVodTypesFilters [%s]" % cItem) try: sportId = cItem['f_sport_id'] variables = {"must":[{"attributeName":"category","values":["%s" % sportId]}],"uiLang":self.serverApiData['locale']['language'],"mediaRights":["GeoMediaRight"],"preferredLanguages":self.serverApiData['locale']['languageOrder']} url = self.serverApiData['server_path']['search'] + '/persisted/query/eurosport/web/ondemand/counts/bycategory?variables=' + urllib.quote(json_dumps(variables, separators=(',', ':'))) sts, data = self.getJSPage(url) if not sts: return totall = 0 data = json_loads(data) for item in [('replays', 'Ondemand_Subnav_Replay'), ('highlights', 'Ondemand_Subnav_Highlights'), ('news', 'Ondemand_Subnav_News')]: try: vodType = item[0] count = int(data['data'][vodType]['meta']['hits']) if count <= 0: continue totall += count title = '%s (%s)' % (self.serverApiData['i18n_dictionary'][item[1]], count) params = dict(cItem) params.update({'good_for_fav':False, 'category':nextCategory, 'title':title, 'f_vod_type':vodType}) self.addDir(params) except Exception: printExc() if totall > 0 and len(self.currList) > 1: title = '%s (%s)' % (self.serverApiData['i18n_dictionary']['Ondemand_Subnav_All'], totall) params = dict(cItem) params.update({'good_for_fav':False, 'category':nextCategory, 'title':title}) self.currList.insert(0, params) except Exception: printExc()