def getLinksForVideo(self, cItem): printDBG("EuroSportPlayer.getLinksForVideo [%s]" % cItem) self.checkLogin() linksTab = [] try: printDBG(str(cItem)) video_id = cItem['video_id'] # open video page video_page_url = cItem['url'] sts, data = self.getPage( video_page_url, {'header' : {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] # open route json page route_id = cItem.get('route_id', '') if route_id: route = self.espRoutes[route_id] printDBG(json_dumps(route)) #{"attributes": {"url": "/videos/eurosport/world-championship-239400", "canonical": true}, "type": "route", "id": "292e72a63ebcccb480984a84f3497b7702623ab6fe6e7d7d29b1dce79ed3da35"} route_url = self.getFullPath(route['attributes']['url'], 'route') + "?include=default" sts, data = self.getPage(route_url) #if sts: #printDBG('--------------------------------') #printDBG(data) # open video playback json page playback_info_url = self.PLAYBACK_URL.replace('{%video_id%}', video_id) sts, data = self.getPage(playback_info_url, {'header' : {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] printDBG('--------------------------------') printDBG(data) j = json_loads(data) s = j['data']['attributes']['streaming'] if 'hls' in s: link_url = strwithmeta(s['hls']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) linksTab.append({'name':'auto hls', 'url': link_url}) linksTab.extend(getDirectM3U8Playlist(link_url, checkExt=False, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999)) #if 'dash' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'dash', 'url': link_url}) #if 'mss' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'mss', 'url': link_url}) except Exception: printExc() return linksTab
def getVideosFromChannelList(self, url, category, page, cItem): printDBG('YouTubeParser.getVideosFromChannelList page[%s]' % (page)) currList = [] try: sts, data = self.cm.getPage(url, self.http_params, self.postdata) if sts: if 'browse_ajax' in url: # next pages response = json_loads(data) rr = {} for r in response: if r.get("response", ""): rr = r break if not rr: return [] r1 = rr["response"]["continuationContents"][ "gridContinuation"] r4 = r1.get("items", []) nP = r1.get('continuations', '') else: # first page of videos self.checkSessionToken(data) data2 = self.cm.ph.getDataBeetwenMarkers( data, "window[\"ytInitialData\"] =", "};", False)[1] if len(data2) == 0: data2 = self.cm.ph.getDataBeetwenMarkers( data, "var ytInitialData =", "};", False)[1] response = json_loads(data2 + "}") r1 = response['contents'][ 'twoColumnBrowseResultsRenderer']['tabs'] r2 = {} for tab in r1: try: if tab['tabRenderer']['content']: r2 = tab['tabRenderer']['content'] except: pass if r2: break r3 = r2['sectionListRenderer']['contents'][0][ 'itemSectionRenderer']['contents'] r4 = r3[0]['gridRenderer'].get('items', '') nP = r3[0]['gridRenderer'].get('continuations', '') for r5 in r4: videoJson = r5.get("gridVideoRenderer", "") if videoJson: params = self.getVideoData(videoJson) if params: printDBG(str(params)) currList.append(params) if nP: nextPage = nP[0] ctoken = nextPage["nextContinuationData"]["continuation"] ctit = nextPage["nextContinuationData"][ "clickTrackingParams"] try: label = nextPage["nextContinuationData"]["label"][ "runs"][0]["text"] except: label = _("Next Page") urlNextPage = "https://www.youtube.com/browse_ajax?ctoken=%s&continuation=%s&itct=%s" % ( ctoken, ctoken, ctit) params = { 'type': 'more', 'category': category, 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) except Exception: printExc() return currList
def exploreItem(self, cItem, nextCategory): printDBG("Cinemaxx.exploreItem") self.cacheLinks = {} sts, data = self.getPage(cItem['url']) if not sts: return cUrl = self.cm.meta['url'] self.setMainUrl(cUrl) desc = [] descObj = self.getArticleContent(cItem, data)[0] icon = descObj['images'][0]['url'] baseTitle = descObj['title'] for item in descObj['other_info']['custom_items_list']: desc.append(item[1]) desc = ' | '.join(desc) + '[/br]' + descObj['text'] data = ph.find(data, ('<div', '>', 'dle-content'), ('<div', '>', 'fstory-info'), flags=0)[1] trailer = ph.find(data, ('<', '>', '#trailer'), '</div>', flags=0)[1] title = self.cleanHtmlStr(trailer) trailer = self.getFullUrl(ph.search(trailer, ph.IFRAME_SRC_URI_RE)[1]) if trailer: self.addVideo({'good_for_fav': True, 'prev_url': cUrl, 'title': '%s %s' % (title, baseTitle), 'url': trailer, 'icon': icon, 'desc': desc}) data = ph.find(data, ('<div', '>', 'full-video'), '</div>', flags=0)[1] url = self.getFullUrl(ph.search(data, ph.IFRAME_SRC_URI_RE)[1]) if url: if ('/video/' in url and '/serials/' in url) or 'playlist' in url: url = strwithmeta(url, {'Referer': cUrl}) seasons = self.hdgocc.getSeasonsList(url) for item in seasons: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'prev_url': cUrl, 'category': nextCategory, 'serie_title': baseTitle, 'title': 'Staffel %s' % item['title'], 'season_id': item['id'], 'url': item['url'], 'icon': icon, 'desc': desc})) if 0 != len(seasons): return seasonUrl = url episodes = self.hdgocc.getEpiodesList(seasonUrl, -1) for item in episodes: title = '{0} - {1} - s01e{2} '.format(baseTitle, item['title'], str(item['id']).zfill(2)) self.addVideo({'good_for_fav': False, 'type': 'video', 'prev_url': cUrl, 'title': title, 'url': item['url'], 'icon': icon, 'desc': desc}) if 0 != len(episodes): return self.addVideo({'good_for_fav': False, 'prev_url': cUrl, 'title': baseTitle, 'url': url, 'icon': icon, 'desc': desc}) else: data = ph.find(data, 'vk.show(', ');', flags=0)[1].split(',', 1)[-1] ret = js_execute('print(JSON.stringify(%s));' % data) if ret['sts'] and 0 == ret['code']: try: data = json_loads(ret['data']) for sNum, season in enumerate(data, 1): subItems = [] for eNum, episode in enumerate(season, 1): title = baseTitle + ' s%se%s' % (str(sNum).zfill(2), str(eNum).zfill(2)) subItems.append({'good_for_fav': False, 'type': 'video', 'prev_url': cUrl, 'title': title, 'url': episode, 'icon': icon, 'desc': desc}) if subItems: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'prev_url': cUrl, 'title': 'Staffel %s' % (str(sNum).zfill(2)), 'category': 'sub_items', 'sub_items': subItems})) except Exception: printExc()
def listsCategories(self, cItem, searchCategories=False): printDBG("TvnVod.listsCategories cItem[%s]" % cItem) pl = 'Panasonic' #self.getDefaultPlatform() searchMode = False page = 1 + cItem.get('page', 0) if 'search' == cItem.get('category', None): searchMode = True urlQuery = '&sort=newest&m=getSearchItems&page=%d&query=%s' % ( page, cItem['pattern']) if cItem.get('search_category', False): pl = 'Android4' elif None != cItem.get('category', None) and None != cItem.get( 'id', None): groupName = 'items' urlQuery = '&type=%s&id=%s&limit=%s&page=%s&sort=newest&m=getItems' % ( cItem['category'], cItem['id'], self.itemsPerPage, page) if 0 < cItem.get('season', 0): urlQuery += "&season=%d" % cItem.get('season', 0) else: groupName = 'categories' urlQuery = '&m=mainInfo' try: url = self.getBaseUrl(pl) + urlQuery sts, data = self.cm.getPage(url, {'header': self.getHttpHeader(pl)}) data = json_loads(data) if 'success' != data['status']: printDBG("TvnVod.listsCategories status[%s]" % data['status']) return countItem = self._getJItemNum(data, 'count_items', None) if None != countItem and countItem > self.itemsPerPage * page: showNextPage = True else: showNextPage = False catalogs = False if searchMode: seasons = None tmp = [] for resItem in data.get('vodProgramItems', {}).get('category', []): tmp.extend(resItem.get('items', [])) for resItem in data.get('vodArticleItems', {}).get('program', []): tmp.extend(resItem.get('items', [])) tmp.extend(data.get('items', [])) data = tmp tmp = None else: seasons = data.get('seasons', None) # some fix for sub-categories # and 0 < len(data.get('items', [])) if 0 < len(data.get('categories', [])) and cItem.get( 'previd', '') != cItem.get('id', ''): catalogs = True groupName = 'categories' showNextPage = False data = data[groupName] showSeasons = False if None != seasons and 0 == cItem.get('season', 0): showSeasons = True numSeasons = len(seasons) else: numSeasons = 0 if 0 != cItem.get('season', 0) or cItem.get('season', 0) == numSeasons: for item in data: category = self._getJItemStr(item, 'type', '') if category in [ 'stream', 'catalog_with_widget', 'pauses', 'favorites' ]: continue id = self._getJItemStr(item, 'id', '') # some fix for sub-categories if catalogs: if 'category' == category: category = 'catalog' if '0' == id: id = cItem['id'] # get title title = self._getJItemStr(item, 'name', '') if '' == title: title = self._getJItemStr(item, 'title', '') if '' == title: if category == 'recommended': continue else: title = 'Brak nazwy' tmp = self._getJItemStr(item, 'episode', '') if tmp not in ('', '0'): title += ", odcinek " + tmp tmp = self._getJItemStr(item, 'season', '') if tmp not in ('', '0'): title += ", sezon " + tmp try: tmp = self._getJItemStr(item, 'start_date', '') if '' != tmp: tmp = time.strptime(tmp, "%Y-%m-%d %H:%M") if tmp > time.localtime(): title += " (planowany)" except Exception: printExc() # get description desc = self._getJItemStr(item, 'lead', '') # get icon icon = self._getIconUrl(item) params = { 'id': id, 'previd': cItem.get('id', ''), 'title': title, 'desc': desc, 'icon': icon, 'category': category, 'season': 0, 'good_for_fav': True, } if 'episode' == category: if cItem.get('search_category', False): continue self.addVideo(params) else: if title in [ 'SPORT', 'Live', 'STREFY', 'KONTYNUUJ OGLĄDANIE', 'ULUBIONE', 'PAKIETY' ]: continue self.addDir(params) else: showNextPage = False if showSeasons: for season in seasons: params = { 'id': cItem['id'], 'previd': cItem.get('id', ''), 'title': self._getJItemStr(season, 'name', ''), 'desc': '', 'icon': self._getIconUrl(season), 'category': cItem[ 'category'], #self._getJItemStr(season, 'type', ''), 'season': self._getJItemNum(season, 'id', 0), 'good_for_fav': True, } self.addDir(params) if showNextPage: params = dict(cItem) params.update({ 'good_for_fav': False, 'title': _('Next page'), 'page': page, 'icon': '', 'desc': '' }) self.addDir(params) except Exception: printExc()
def getList(self, cItem): printDBG("VideoStarApi.getList") rm(self.COOKIE_FILE) self.informAboutGeoBlockingIfNeeded('PL') login = config.plugins.iptvplayer.videostar_login.value password = config.plugins.iptvplayer.videostar_password.value if login != '' and password != '': self.accountInfo = '' ret, msg = self.doLogin(login, password) if ret: self.loggedIn = True self.accountInfo = msg else: self.sessionEx.open( MessageBox, '%s\nProblem z zalogowanie użytkownika "%s". Sprawdź dane do logowania w konfiguracji hosta.' % (msg, login), type=MessageBox.TYPE_INFO, timeout=10) self.loggedIn = False else: self.doLogin('guest', 'guest') self.cacheChannelList = [] channelsTab = [] if self.loggedIn: url = self.getFullUrl('/channels/list?device=web', 'api') else: url = self.getFullUrl('/static/guest/channels/list/web.json', 'static') sts, data = self.cm.getPage(url, self.defaultParams) if not sts: return channelsTab try: idx = 0 data = json_loads(data, '', True) for item in data['channels']: guestTimeout = item.get('guest_timeout', '') if not config.plugins.iptvplayer.videostar_show_all_channels.value and ( item['access_status'] == 'unsubscribed' or (not self.loggedIn and guestTimeout == '0')): continue title = self.cleanHtmlStr(item['name']) icon = self.getFullUrl(item.get('thumbnail', '')) url = self.getFullUrl(item['slug']) desc = [] if item.get('hd', False): desc.append('HD') else: desc.append('SD') if self.loggedIn: desc.append(item['access_status']) elif guestTimeout != '': desc.append(_('Guest timeout: %s') % guestTimeout) if item.get('geoblocked', False): desc.append('geoblocked') params = { 'name': cItem['name'], 'type': 'video', 'title': title, 'url': url, 'icon': icon, 'priv_idx': idx, 'desc': ' | '.join(desc) } channelsTab.append(params) self.cacheChannelList.append(item) idx += 1 except Exception: printExc() return channelsTab
def _get_automatic_captions(self, video_id, webpage=None): sub_tracks = [] if None == webpage: url = 'http://www.youtube.com/watch?v=%s&hl=%s&has_verified=1' % ( video_id, GetDefaultLang()) sts, data = self.cm.getPage(url) if not sts: return sub_tracks sts, data = self.cm.ph.getDataBeetwenMarkers(data, ';ytplayer.config =', '};', False) if not sts: return sub_tracks try: player_config = json_loads(data.strip() + '}') args = player_config['args'] caption_url = args.get('ttsurl') if caption_url: timestamp = args['timestamp'] # We get the available subtitles list_params = urllib.urlencode({ 'type': 'list', 'tlangs': 1, 'asrs': 1, }) list_url = caption_url + '&' + list_params caption_list = self.cm.getPage(list_url) printDBG(caption_list) return sub_lang_list original_lang_node = caption_list.find('track') if original_lang_node is None: return [] original_lang = original_lang_node.attrib['lang_code'] caption_kind = original_lang_node.attrib.get('kind', '') sub_lang_list = {} for lang_node in caption_list.findall('target'): sub_lang = lang_node.attrib['lang_code'] sub_formats = [] for ext in self._SUBTITLE_FORMATS: params = urllib.urlencode({ 'lang': original_lang, 'tlang': sub_lang, 'fmt': ext, 'ts': timestamp, 'kind': caption_kind, }) sub_formats.append({ 'url': caption_url + '&' + params, 'ext': ext, }) sub_lang_list[sub_lang] = sub_formats return sub_lang_list # Some videos don't provide ttsurl but rather caption_tracks and # caption_translation_languages (e.g. 20LmZk1hakA) caption_tracks = args['caption_tracks'] caption_translation_languages = args[ 'caption_translation_languages'] caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0] parsed_caption_url = urlparse(caption_url) caption_qs = compat_parse_qs(parsed_caption_url.query) sub_lang_list = {} for lang in caption_translation_languages.split(','): lang_qs = compat_parse_qs(urllib.unquote_plus(lang)) sub_lang = lang_qs.get('lc', [None])[0] if not sub_lang: continue caption_qs.update({ 'tlang': [sub_lang], 'fmt': ['vtt'], }) sub_url = urlunparse( parsed_caption_url._replace( query=urllib.urlencode(caption_qs, True))) sub_tracks.append({ 'title': lang_qs['n'][0].encode('utf-8'), 'url': sub_url, 'lang': sub_lang.encode('utf-8'), 'ytid': len(sub_tracks), 'format': 'vtt' }) except Exception: printExc() return sub_tracks
def getLinksForVideo(self, cItem): printDBG(": %s" % cItem) self.initApi() linksTab = self.cacheLinks.get(cItem['url'], []) if linksTab: return linksTab if cItem.get('is_live'): channelId = cItem.get('call_sign') if not channelId: sts, data = self.getPage(cItem['url']) if not sts: return channelId = ph.search( data, '''/diretta/[^'^"]+?_c([^'^"]+?)['"][^>]*?>\s*?diretta\s*?<''', flags=ph.I)[0] url = self.API_BASE_URL + 'alive/nownext/v1.0?channelId=' + channelId sts, data = self.getPage(url) if not sts: return try: data = json_loads(data) for tuningInstructions in data['response'][ 'tuningInstruction'].itervalues(): for item in tuningInstructions: printDBG(item) url = item['streamingUrl'].split('?', 1)[0].replace( 't-mediaset-it', '-mediaset-it') if 'mpegurl' in item['format'].lower(): f = 'HLS/M3U8' elif 'dash' in item['format'].lower(): f = 'DASH/MPD' else: continue linksTab.append({ 'name': f, 'url': strwithmeta(url, {'priv_type': f}), 'need_resolve': 1 }) except Exception: printExc() else: guid = cItem.get('guid', '') if not guid: guid = ph.search( cItem['url'], r'''https?://(?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/(?:(?:video|on-demand)/(?:[^/]+/)+[^/]+_|player/index\.html\?.*?\bprogramGuid=)([0-9A-Z]{16})''' )[0] if not guid: return linksTab tp_path = 'PR1GhC/media/guid/2702976343/' + guid uniqueUrls = set() for asset_type in ('SD', 'HD'): for f in (('MPEG4', 'MP4', 0), ('MPEG-DASH', 'DASH/MPD', 1), ('M3U', 'HLS/M3U8', 1)): url = 'http://link.theplatform.%s/s/%s?mbr=true&formats=%s&assetTypes=%s' % ( 'eu', tp_path, f[0], asset_type) sts, data = self.cm.getPage(url, post_data={'format': 'SMIL'}) if not sts: continue if 'GeoLocationBlocked' in data: SetIPTVPlayerLastHostError(ph.getattr( data, 'abstract')) printDBG("++++++++++++++++++++++++++++++++++") printDBG(data) tmp = ph.findall(data, '<video', '>') for item in tmp: url = ph.getattr(item, 'src') if not self.cm.isValidUrl(url): continue if url not in uniqueUrls: uniqueUrls.add(url) linksTab.append({ 'name': '%s - %s' % (f[1], asset_type), 'url': strwithmeta(url, {'priv_type': f[1]}), 'need_resolve': f[2] }) if len(linksTab): self.cacheLinks[cItem['url']] = linksTab return linksTab
def getPage(self, baseUrl, addParams={}, post_data=None): if addParams == {}: addParams = dict(self.defaultParams) def _getFullUrl(url): if url == '': return '' if self.cm.isValidUrl(url): return url else: return urlparse.urljoin(baseUrl, url) addParams['cloudflare_params'] = { 'domain': self.up.getDomain(baseUrl), 'cookie_file': self.COOKIE_FILE, 'User-Agent': self.USER_AGENT, 'full_url_handle': _getFullUrl } url = baseUrl urlParams = deepcopy(addParams) urlData = deepcopy(post_data) unloadUrl = None # tries = 0 removeCookieItems = False while tries < 20: tries += 1 sts, data = self.cm.getPageCFProtection(url, urlParams, urlData) if not sts: return sts, data if unloadUrl != None: self.cm.getPageCFProtection(unloadUrl, urlParams) unloadUrl = None if 'sucuri_cloudproxy' in data: cookieItems = {} jscode = self.cm.ph.getDataBeetwenNodes( data, ('<script', '>'), ('</script', '>'), False)[1] if 'eval' in jscode: jscode = '%s\n%s' % (base64.b64decode( '''dmFyIGlwdHZfY29va2llcz1bXSxkb2N1bWVudD17fTtPYmplY3QuZGVmaW5lUHJvcGVydHkoZG9jdW1lbnQsImNvb2tpZSIse2dldDpmdW5jdGlvbigpe3JldHVybiIifSxzZXQ6ZnVuY3Rpb24obyl7bz1vLnNwbGl0KCI7IiwxKVswXS5zcGxpdCgiPSIsMiksb2JqPXt9LG9ialtvWzBdXT1vWzFdLGlwdHZfY29va2llcy5wdXNoKG9iail9fSk7dmFyIHdpbmRvdz10aGlzLGxvY2F0aW9uPXt9O2xvY2F0aW9uLnJlbG9hZD1mdW5jdGlvbigpe3ByaW50KEpTT04uc3RyaW5naWZ5KGlwdHZfY29va2llcykpfTs=''' ), jscode) ret = js_execute(jscode) if ret['sts'] and 0 == ret['code']: try: cookies = byteify(json_loads(ret['data'].strip())) for cookie in cookies: cookieItems.update(cookie) except Exception: printExc() self.defaultParams['cookie_items'] = cookieItems urlParams['cookie_items'] = cookieItems removeCookieItems = False sts, data = self.cm.getPageCFProtection( url, urlParams, urlData) # remove not needed used cookie if removeCookieItems: self.defaultParams.pop('cookie_items', None) self.cm.clearCookie(self.COOKIE_FILE, removeNames=['___utmvc']) #printDBG(data) return sts, data return self.cm.getPageCFProtection(baseUrl, addParams, post_data)
def getChannelsList(self, cItem): printDBG("UstvnowApi.getChannelsList") login = config.plugins.iptvplayer.ustvnow_login.value passwd = config.plugins.iptvplayer.ustvnow_password.value if '' != login.strip() and '' != passwd.strip(): self.token = self.doLogin(login, passwd) self.passkey = self.getPasskey() if self.token == '' or self.passkey == '': self.sessionEx.open( MessageBox, _('An error occurred when try to sign in the user "%s.\nPlease check your login credentials and try again later..."' ) % login, type=MessageBox.TYPE_INFO, timeout=10) return [] else: self.sessionEx.open( MessageBox, _('You need to enter email and password in configuration.'), type=MessageBox.TYPE_INFO, timeout=10) return [] sts, data = self.cm.getPage(self.LIVE_URL, self.defParams) if not sts: return [] channelsNames = self._getChannelsNames() channelsTab = [] data = self.cm.ph.getDataBeetwenMarkers( data, '<div data-role="content" data-theme="c">', '</ul>', False)[1] data = data.split('</li>') prgsvcidMap = {} for item in data: url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0] ui = url.split('ui-page=')[-1] icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"')[0] desc = self.cleanHtmlStr(item) params = dict(cItem) params.pop('url') params.update({ 'priv_url': self.getFullUrl(url), 'ui_page': ui, 'icon': icon, 'desc': desc }) for nameItem in channelsNames: if nameItem['img'] in icon: if config.plugins.iptvplayer.ustvnow_only_available.value and 0 == nameItem[ 't']: break params[ 'title'] = nameItem['sname'] + ' [%s]' % nameItem['t'] params['prgsvcid'] = nameItem['prgsvcid'] params['scode'] = nameItem['scode'] prgsvcidMap[params['prgsvcid']] = len(channelsTab) channelsTab.append(params) break # calculate time difference from utcnow and the local system time reported by OS OFFSET = datetime.now() - datetime.utcnow() if config.plugins.iptvplayer.ustvnow_epg.value: sts, data = self.cm.getPage( self.MAIN_URL + 'gtv/1/live/channelguide', self.defParams) if sts: try: data = json_loads(data) for item in data['results']: if item['prgsvcid'] in prgsvcidMap: idx = prgsvcidMap[item['prgsvcid']] utc_date = datetime.strptime( item.get('event_date', '') + ' ' + item.get('event_time', ''), '%Y-%m-%d %H:%M:%S') utc_date = utc_date + OFFSET if utc_date.time().second == 59: utc_date = utc_date + timedelta(0, 1) channelsTab[idx][ 'desc'] += '[/br][/br] [%s][/br]%s[/br]%s[/br]%s[/br]%s' % ( utc_date.strftime('%Y-%m-%d %H:%M:%S'), item.get('title', ''), item.get('synopsis', ''), item.get('description', ''), item.get('episode_title', '')) except Exception: printExc() return channelsTab
def SearchResult(self, str_ch, page, extra): multi_tab = xtream_get_conf() for elm in multi_tab: xhost_ = elm[1] self.addMarker({ 'title': ' ** ' + elm[0] + ' ** ', 'icon': '', 'desc': '' }) try: Url = xhost_ + '/player_api.php?username='******'&password='******'&action=get_vod_streams' sts, data = self.cm.getPage(Url) data = json_loads(data) printDBG('fffffffffffff' + str_ch.lower()) for elm0 in data: if str_ch.lower() in elm0['name'].lower(): Url = xhost_ + '/movie/' + elm[2] + '/' + elm[ 3] + '/' + str( elm0['stream_id'] ) + '.' + elm0['container_extension'] if elm[4] != '': Url = strwithmeta(Url, {'User-Agent': elm[4]}) if elm0['stream_icon']: stream_icon = elm0['stream_icon'] else: stream_icon = '' if elm0['rating']: rating = str(elm0['rating']) else: rating = '' self.addVideo({ 'import': extra, 'good_for_fav': True, 'name': 'categories', 'category': 'video', 'url': Url, 'title': elm0['name'], 'icon': stream_icon, 'desc': 'Rating: ' + rating, 'hst': 'direct' }) except: pass try: Url = xhost_ + '/player_api.php?username='******'&password='******'&action=get_series' sts, data = self.cm.getPage(Url) data = json_loads(data) for elm0 in data: if str_ch.lower() in elm0['name'].lower(): if elm0['cover']: stream_icon = elm0['cover'] else: stream_icon = '' if elm0['rating']: rating = str(elm0['rating']) else: rating = '' if elm0['plot']: plot = str(elm0['plot']) else: plot = '' if elm0['genre']: genre = str(elm0['genre']) else: genre = '' desc = 'GENRE:' + genre + ' RATING:' + rating + '/10 \nPlot: ' + plot self.addDir({ 'import': extra, 'good_for_fav': True, 'name': 'categories', 'category': 'host2', 'url': str(elm0['series_id']), 'title': elm0['name'], 'icon': stream_icon, 'desc': desc, 'xuser': elm[2], 'xpass': elm[3], 'xhost': xhost_, 'xua': elm[4], 'mode': '23' }) except: pass
def showmenu1(self, cItem): try: self.addMarker({ 'category': 'xtream_vod', 'title': tscolor('\c0000??00') + 'Films', 'icon': '', 'desc': '' }) Url = cItem['xhost'] + '/player_api.php?username='******'xuser'] + '&password='******'xpass'] + '&action=get_vod_categories' sts, data = self.cm.getPage(Url) data = json_loads(data) self.addDir({ 'import': cItem['import'], 'category': 'host2', 'category_id': '', 'title': 'All', 'desc': '', 'xuser': cItem['xuser'], 'xpass': cItem['xpass'], 'xhost': cItem['xhost'], 'xua': cItem['xua'], 'mode': '21', 'icon': cItem['icon'] }) for elm in data: self.addDir({ 'import': cItem['import'], 'good_for_fav': True, 'category': 'host2', 'category_id': elm['category_id'], 'title': elm['category_name'].strip(), 'desc': '', 'xuser': cItem['xuser'], 'xpass': cItem['xpass'], 'xhost': cItem['xhost'], 'xua': cItem['xua'], 'mode': '21', 'icon': cItem['icon'] }) except: pass try: self.addMarker({ 'category': 'xtream_vod', 'title': tscolor('\c0000??00') + 'Series', 'icon': '', 'desc': '' }) Url = cItem['xhost'] + '/player_api.php?username='******'xuser'] + '&password='******'xpass'] + '&action=get_series_categories' sts, data = self.cm.getPage(Url) data = json_loads(data) self.addDir({ 'import': cItem['import'], 'name': 'categories', 'category': 'host2', 'category_id': '', 'title': 'All', 'desc': '', 'xuser': cItem['xuser'], 'xpass': cItem['xpass'], 'xhost': cItem['xhost'], 'xua': cItem['xua'], 'mode': '22', 'icon': cItem['icon'] }) for elm in data: self.addDir({ 'import': cItem['import'], 'good_for_fav': True, 'name': 'categories', 'category': 'host2', 'category_id': elm['category_id'], 'title': elm['category_name'].strip(), 'desc': '', 'xuser': cItem['xuser'], 'xpass': cItem['xpass'], 'xhost': cItem['xhost'], 'xua': cItem['xua'], 'mode': '22', 'icon': cItem['icon'] }) except: pass self.addDir({ 'import': cItem['import'], 'name': 'search', 'category': 'search', 'title': _('Search'), 'search_item': True, 'page': -1, 'hst': 'tshost', 'icon': cItem['icon'] })
def listEPG(self, cItem): str = cItem['name'] epgDate = str[:10] channelName = str[11:] printDBG("Raiplay - start EPG for channel %s and day %s" % (channelName, epgDate)) channel_id = channelName.replace(" ", "") url = self.EPG_URL url = url.replace("[nomeCanale]", channel_id) url = url.replace("[dd-mm-yyyy]", epgDate) sts, data = self.getPage(url) if not sts: return response = json_loads(data) programmes = response[channelName][0]["palinsesto"][0]["programmi"] for programme in programmes: if not programme: continue startTime = programme["timePublished"] title = programme["name"] if programme["images"]["portrait"] != "": thumb = self.getThumbnailUrl(programme["images"]["portrait"]) elif programme["images"]["landscape"] != "": thumb = self.getThumbnailUrl(programme["images"]["landscape"]) elif programme["isPartOf"] and programme["isPartOf"]["images"][ "portrait"] != "": thumb = self.getThumbnailUrl( programme["isPartOf"]["images"]["portrait"]) elif programme["isPartOf"] and programme["isPartOf"]["images"][ "landscape"] != "": thumb = self.getThumbnailUrl( programme["isPartOf"]["images"]["landscape"]) else: thumb = self.NOTHUMB_URL if programme["testoBreve"] != "": desc = programme["testoBreve"] else: desc = programme["description"] if programme["hasVideo"]: videoUrl = programme["pathID"] else: videoUrl = None params = dict(cItem) if videoUrl is None: # programme is not available title = startTime + " <I>" + title + "</I>" thumbnailImage = thumb params = { 'title': title, 'url': '', 'icon': thumb, 'desc': desc, 'category': 'nop' } else: title = startTime + " " + title thumbnailImage = thumb params = { 'title': title, 'url': videoUrl, 'icon': thumb, 'category': 'program', 'desc': desc } printDBG("add program %s with pathId %s" % (title, videoUrl)) self.addVideo(params)
def getVideos(self, videoUrl): urlTab = [] referer = '' printDBG('||||||||||||||||||:' + videoUrl) if '||' in videoUrl: Url, referer = videoUrl.split('||') else: Url = videoUrl id_ = '' if '/v/' in Url: id_ = Url.split('/v/', 1)[1] elif '/x/embed/' in Url: id_ = Url.split('/x/embed/', 1)[1] id_ = id_.replace('/', '') if 'd0stream' in Url: url_ = 'https://v.d0stream.com/' printDBG( 'refererrefererrefererrefererrefererrefererrefererrefererreferer=' + referer) Params = dict(self.defaultParams) Params['header']['Referer'] = referer sts, data = self.getPage(url_, Params) if sts: printDBG('data_films=' + data) films_list = re.findall('video-pr\'>(.*?)<', data, re.S) if films_list: id_2 = films_list[0] id_1 = Url.split('/')[-1] URL = 'https://v.d0stream.com/krade.io/we/' + id_1 + '/' + id_2 + '#/' + id_1 Params['header']['Referer'] = 'https://v.d0stream.com/' sts, data = self.getPage(URL, Params) if sts: printDBG('data_films2=' + data) films_list = re.findall('HlsSources.*?url":"(.*?)"', data, re.S) if films_list: src = films_list[0] printDBG('src=' + src) urlTab.append( ('https://v.d0stream.com' + src, '3')) return urlTab return [] elif 'film-hd.vip' in Url: post_data = {'r': '', 'd': 'film-hd.vip'} url1 = 'https://film-hd.vip/api/source/' + id_ else: post_data = {'r': '', 'd': 'clickopen.club'} url1 = 'https://clickopen.club/api/source/' + id_ sts, data = self.getPage(url1, post_data=post_data) if sts: try: data = json_loads(data) for elm in data['data']: printDBG('rrrrrrrrrrrrrrrrr' + str(elm)) titre = elm['type'] + ' (' + elm['label'] + ')' urlTab.append((titre + '|' + elm['file'], '4')) except: printDBG('eurreur post page') return urlTab
try: r = urllib2.Request(url, headers={'X-API-KEY': API_KEY}) if method == METHOD_POST and params: print("POST data: %s" % urllib.urlencode(params)) r.add_data(urllib.urlencode(params)) u = urllib2.urlopen(r) data = u.read() u.close() except urllib2.HTTPError, error: data = error.read() except Exception as ex: raise YouSeeApiException(ex) try: return json_loads(data) except Exception: return None class YouSeeLiveTVApi(YouSeeApi): def channel(self, id): return self._invoke(AREA_LIVETV, 'channel', {'id': id}) def popularChannels(self): return self._invoke(AREA_LIVETV, 'popularchannels') def allowedChannels(self): return self._invoke(AREA_LIVETV, 'allowed_channels', {'apiversion': 2}) def suggestedChannels(self):
def listItems2(self, cItem, nextCategory): printDBG("FreeDiscPL.listItems2 cItem[%s]" % (cItem)) page = cItem.get('page', 0) post_data = { "search_phrase": cItem.get('f_search_pattern', ''), "search_type": cItem.get('f_search_type', ''), "search_saved": 0, "pages": 0, "limit": 0 } if page > 0: post_data['search_page'] = page params = dict(self.defaultParams) params['raw_post_data'] = True params['header'] = dict(self.AJAX_HEADER) params['header']['Referer'] = self.cm.getBaseUrl(self.getMainUrl( )) + 'search/%s/%s' % (cItem.get('f_search_type', ''), urllib.quote(cItem.get('f_search_pattern', ''))) sts, data = self.getPage(cItem['url'], params, json_dumps(post_data)) if not sts: return printDBG(data) try: data = json_loads(data)['response'] logins = data['logins_translated'] translated = data['directories_translated'] for item in data['data_files']['data']: userItem = logins[str(item['user_id'])] dirItem = translated[str(item['parent_id'])] icon = 'http://img.freedisc.pl/photo/%s/7/2/%s.png' % ( item['id'], item['name_url']) url = '/%s,f-%s,%s' % (userItem['url'], item['id'], item['name_url']) title = item['name'] desc = ' | '.join( [item['date_add_format'], item['size_format']]) desc += '[/br]' + (_('Added by: %s, directory: %s') % (userItem['display'], dirItem['name'])) params = dict(cItem) params.update({ 'good_for_fav': True, 'f_user_item': userItem, 'f_dir_item': dirItem, 'category': nextCategory, 'title': self.cleanHtmlStr(title), 'url': self.getFullUrl(url), 'icon': self.getFullIconUrl(icon), 'desc': desc, 'f_type': item.get('type_fk', '') }) if params['f_type'] in ['7', '6']: self.addDir(params) if data['pages'] > page: params = dict(cItem) params.update({ 'good_for_fav': False, 'title': _('Next page'), 'page': page + 1 }) self.addDir(params) except Exception: printExc()
def getVideoLink(self, cItem): printDBG("UstvnowApi.getVideoLink %s" % cItem) urlsTab = [] cookieParams = { 'cookiefile': self.cookiePath, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True } sts, data = self.cm.getPage( 'http://m-api.ustvnow.com/stream/1/live/view?scode=%s&token=%s&key=%s' % (cItem.get('scode', ''), self.token, self.passkey), self.defParams) if sts: try: data = json_loads(data) tmp = getDirectM3U8Playlist(strwithmeta( data['stream'], {'User-Agent': self.HTTP_HEADER['User-Agent']}), cookieParams=cookieParams, checkContent=True) cookieValue = self.cm.getCookieHeader(self.cookiePath) for item in tmp: vidUrl = item['url'] item['url'] = urlparser.decorateUrl( vidUrl, { 'User-Agent': self.HTTP_HEADER['User-Agent'], 'Cookie': cookieValue }) urlsTab.append(item) if len(urlsTab): return urlsTab except Exception: printExc() sts, data = self.cm.getPage(self.LIVE_URL, self.defParams) if not sts: return [] url = self.cm.ph.getSearchGroups( data, 'for="popup-%s"[^>]*?href="([^"]+?)"[^>]*?>' % cItem['ui_page'])[0] url = self.getFullUrl(url) sts, data = self.cm.getPage(url, self.defParams) if not sts: return [] url = self.cm.ph.getSearchGroups(data, 'src="([^"]+?)"')[0] tmp = getDirectM3U8Playlist(strwithmeta( url, {'User-Agent': self.HTTP_HEADER['User-Agent']}), cookieParams=cookieParams, checkContent=True) cookieValue = self.cm.getCookieHeader(self.cookiePath) for item in tmp: vidUrl = item['url'] item['url'] = urlparser.decorateUrl( vidUrl, { 'User-Agent': self.HTTP_HEADER['User-Agent'], 'Cookie': cookieValue }) urlsTab.append(item) return urlsTab
def listDir(self, cItem): printDBG("FreeDiscPL.listDir cItem[%s]" % (cItem)) #sts, data = self.getPage(cItem['url']) #if not sts: return userId = cItem.get('f_user_id', '') dirId = cItem.get('f_dir_id', '') urlParams = dict(self.defaultParams) urlParams['raw_post_data'] = True urlParams['header'] = dict(self.AJAX_HEADER) urlParams['header']['Referer'] = cItem['url'] try: dirIcon = self.getFullIconUrl('/static/img/icons/big_dir.png') if userId not in self.treeCache: self.treeCache = {} url = self.getFullUrl('/directory/directory_data/get_tree/%s' % (userId)) sts, data = self.getPage(url, urlParams) if not sts: return self.treeCache[userId] = json_loads(data, '', True)['response']['data'] # sub dirs at first if dirId in self.treeCache[userId]: dirsTab = [] for key in self.treeCache[userId][dirId]: if self.treeCache[userId][dirId][key]['type'] == 'd': dirsTab.append(self.treeCache[userId][dirId][key]) dirsTab.sort(key=lambda item: item['name']) #, reverse=True) for item in dirsTab: if item['id'] in ['0', dirId]: continue url = '/%s,d-%s,%s' % (userId, item['id'], item['name_url']) title = self.cleanHtmlStr(item['name']) desc = ['Katalogów: %s' % item['dir_count']] desc.append('Plików: %s' % item['file_count']) params = dict(cItem) params.update({ 'good_for_fav': True, 'title': title, 'url': self.getFullUrl(url), 'icon': dirIcon, 'f_dir_id': item['id'], 'f_prev_dir_id': dirId, 'prev_url': cItem['url'], 'desc': '[/br]'.join(desc) }) self.addDir(params) # now files data url = self.getFullUrl('/directory/directory_data/get/%s/%s' % (userId, dirId)) sts, data = self.getPage(url, urlParams) if not sts: return data = json_loads(data, '', True)['response']['data'] if 'data' in data: filesTab = [] for key in data['data']: if data['data'][key]['type'] == 'f' and data['data'][key][ 'type_fk'] in ['7', '6']: filesTab.append(data['data'][key]) filesTab.sort(key=lambda item: item['name']) #, reverse=True) url = self.getFullIconUrl('/static/img/icons/big_dir.png') for item in filesTab: if '7' == item['type_fk']: icon = 'http://img.freedisc.pl/photo/%s/7/2/%s.png' % ( item['id'], item['name_url']) else: icon = '' url = '/%s,f-%s,%s' % (userId, item['id'], item['name_url']) title = self.cleanHtmlStr(item['name']) desc = ' | '.join( [item['date_add_format'], item['size_format']]) params = dict(cItem) params.update({ 'good_for_fav': True, 'title': title, 'url': self.getFullUrl(url), 'icon': self.getFullIconUrl(icon), 'desc': desc, 'f_type': item.get('type_fk', '') }) if params['f_type'] == '7': self.addVideo(params) else: self.addAudio(params) # find parent id data parentId = None tmpId = 'd-%s' % dirId for key in self.treeCache[userId]: printDBG(">>> %s" % key) if tmpId in self.treeCache[userId][key]: parentId = self.treeCache[userId][key][tmpId]['parent_id'] break if parentId == None: return item = None # find parent id item tmpId = 'd-%s' % parentId for key in self.treeCache[userId]: if tmpId in self.treeCache[userId][key]: item = self.treeCache[userId][key][tmpId] break if item == None: return if item['id'] not in ['0', dirId, cItem.get('f_prev_dir_id', '')]: url = '/%s,d-%s,%s' % (userId, item['id'], item['name_url']) title = self.cleanHtmlStr(item['name']) desc = ['Katalogów: %s' % item['dir_count']] desc.append('Plików: %s' % item['file_count']) params = dict(cItem) params.update({ 'good_for_fav': True, 'title': title, 'url': self.getFullUrl(url), 'icon': dirIcon, 'f_dir_id': item['id'], 'f_prev_dir_id': dirId, 'prev_url': cItem['url'], 'desc': '[/br]'.join(desc) }) self.currList.insert(0, params) except Exception: printExc()
def get_links(self, cItem): urlTab = [] URL = cItem['url'] sts, data = self.getPage(URL, self.defaultParams) if sts: Tab_els = re.findall('src="(https://www.youtube.*?)"', data, re.S) if Tab_els: urlTab.append({ 'name': 'TRAILER', 'url': Tab_els[0], 'need_resolve': 1 }) Tab_els = re.findall( 'play-box-iframe.*?data-src="(.*?)".*?></iframe>', data, re.S) for url in Tab_els: try: if url.startswith('/'): url = 'http:' + url if '//moviflex.net' in url: sts, data = self.getPage(url, self.defaultParams) url_els = re.findall('<source src="(.*?)"', data, re.S) if url_els: urlTab.append({ 'name': 'Moviflex', 'url': url_els[0], 'need_resolve': 0, 'type': 'local' }) elif '//moviflex.se' in url: sts, data = self.getPage(url, self.defaultParams) url_els = re.findall('file":.*?"(.*?)"', data, re.S) if url_els: urlTab.append({ 'name': 'Moviflex', 'url': url_els[0], 'need_resolve': 0, 'type': 'local' }) elif 'moviflex.ml' in url: post_data = {'r': '', 'd': 'moviflex.ml'} url = url.replace('/v/', '/api/source/') sts, data = self.getPage(url, self.defaultParams, post_data) data = json_loads(data) elmdata = data['data'] for elm0 in elmdata: urlTab.append({ 'name': '|' + elm0['label'] + '| Moviflex', 'url': elm0['file'], 'need_resolve': 0, 'type': 'local' }) elif '.moviflex.pw' in url: paramsUrl = dict(self.defaultParams1) paramsUrl['header']['Referer'] = URL sts, data = self.getPage(url, paramsUrl) url_els = re.findall('JuicyCodes.Run\((.*?)\)', data, re.S) if url_els: b64data = url_els[0] b64data = b64data.replace('"+"', '').replace('"', '') script_data = base64.b64decode(b64data) script_ = unpackJSPlayerParams( script_data, SAWLIVETV_decryptPlayerParams, 0) url_els = re.findall( 'file":"(.*?)".*?label":"(.*?)"', script_, re.S) for (url_, titre_) in url_els: urlTab.append({ 'name': '|' + titre_ + '| Moviflex', 'url': strwithmeta(url_, {'Referer': url}), 'need_resolve': 0, 'type': 'local' }) else: if len(url) > 4: #url11 = url.split('https://') #url='https://' + url11[-1] urlTab.append({ 'name': gethostname(url), 'url': url, 'need_resolve': 1 }) except: a = '' return urlTab
def _real_extract(self, url, allowVP9=False, allowAgeGate=False): # Extract original video URL from URL with redirection, like age verification, using next_url parameter mobj = re.search(self._NEXT_URL_RE, url) if mobj: #https url = 'http://www.youtube.com/' + compat_urllib_parse.unquote( mobj.group(1)).lstrip('/') video_id = self._extract_id(url) if 'yt-video-id' == video_id: video_id = self.cm.ph.getSearchGroups( url + '&', '[\?&]docid=([^\?^&]+)[\?&]')[0] isGoogleDoc = True url = url videoKey = 'docid' videoInfoBase = 'https://docs.google.com/get_video_info?docid=%s' % video_id COOKIE_FILE = GetCookieDir('docs.google.com.cookie') videoInfoparams = { 'cookiefile': COOKIE_FILE, 'use_cookie': True, 'load_cookie': False, 'save_cookie': True } else: url = 'http://www.youtube.com/watch?v=%s&' % video_id isGoogleDoc = False videoKey = 'video_id' videoInfoBase = 'https://www.youtube.com/get_video_info?video_id=%s&' % video_id videoInfoparams = {} sts, video_webpage = self.cm.getPage(url) if not sts: raise ExtractorError('Unable to download video webpage') # Get video info #if re.search(r'player-age-gate-content">', video_webpage) is not None: if allowAgeGate and re.search(r'"LOGIN_REQUIRED"', video_webpage) is not None: #self.report_age_confirmation() age_gate = True # We simulate the access to the video from www.youtube.com/v/{video_id} # this can be viewed without login into Youtube data = compat_urllib_parse.urlencode({ 'el': 'embedded', 'gl': 'US', 'hl': 'en', 'eurl': 'https://youtube.googleapis.com/v/' + video_id, 'asv': 3, 'sts': '1588', }) video_info_url = videoInfoBase + data sts, video_info = self.cm.getPage(video_info_url, videoInfoparams) if not sts: raise ExtractorError('Faile to get "%s"' % video_info_url) else: age_gate = False tries = 0 tokenFound = False while (tries < 5) and (not tokenFound): for el_type in [ '&el=detailpage', '&el=embedded', '&el=vevo', '' ]: #https video_info_url = videoInfoBase + ( '%s&ps=default&eurl=&gl=US&hl=en' % (el_type)) sts, video_info = self.cm.getPage(video_info_url, videoInfoparams) if not sts: continue if 'token' in video_info or 'Token' in video_info: if 'channel_creation_token' in video_info: printDBG("channel_creation_token found!") elif 'account_playback_token' in video_info: printDBG("account_playback_token found!") else: printDBG("different token found!") printDBG("token found after %s tries!" % (tries + 1)) tokenFound = True break tries = tries + 1 if not tokenFound: raise ExtractorError('"token" parameter not in video info') # Check for "rental" videos if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: raise ExtractorError('"rental" videos not supported') # Start extracting information video_info = video_info.split('&') video_info2 = {} for item in video_info: item = item.split('=') if len(item) < 2: continue video_info2[item[0].strip()] = item[1].strip() video_info = video_info2 del video_info2 dashmpd = str(_unquote(str(video_info.get('dashmpd', '')), None)) # subtitles if 'length_seconds' not in video_info: video_duration = '' else: video_duration = video_info['length_seconds'] if 'url_encoded_fmt_stream_map' in video_info: video_info['url_encoded_fmt_stream_map'] = [ _unquote(video_info['url_encoded_fmt_stream_map'], None) ] if 'adaptive_fmts' in video_info: video_info['adaptive_fmts'] = [ _unquote(video_info['adaptive_fmts'], None) ] try: mobj = re.search(r';ytplayer.config = ({.*?});', video_webpage) if not mobj: raise ValueError('Could not find vevo ID') ytplayer_config = json_loads(mobj.group(1)) args = ytplayer_config['args'] # Easy way to know if the 's' value is in url_encoded_fmt_stream_map # this signatures are encrypted if 'url_encoded_fmt_stream_map' not in args: raise ValueError('No stream_map present') # caught below re_signature = re.compile(r'[&,]s=') m_s = re_signature.search(args['url_encoded_fmt_stream_map']) if m_s is not None: printDBG('%s: Encrypted signatures detected.' % video_id) video_info['url_encoded_fmt_stream_map'] = [ args['url_encoded_fmt_stream_map'] ] m_s = re_signature.search(args.get('adaptive_fmts', '')) except ValueError: pass # Decide which formats to download is_m3u8 = 'no' url_map = {} video_url_list = {} if len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len( video_info.get('adaptive_fmts', [])) >= 1: encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get( 'adaptive_fmts', [''])[0] _supported_formats = self._supported_formats if allowVP9: _supported_formats.extend(['313', '271']) for url_data_str in encoded_url_map.split(','): if 'index=' in url_data_str and 'index=0-0&' in url_data_str: continue if 'itag=' in url_data_str and 'url=' in url_data_str: url_data_str = url_data_str.split('&') url_data = {} supported = False for item in url_data_str: item = item.split('=') if len(item) < 2: continue key = item[1].strip() if item[0] == 'itag': if key in self._supported_formats: supported = True else: break url_data[item[0]] = key if not supported: continue url_item = {'url': _unquote(url_data['url'], None)} if 'sig' in url_data: signature = url_data['sig'] url_item['url'] += '&signature=' + signature elif 's' in url_data: url_item['esign'] = _unquote(url_data['s']) if 'sp' in url_data: url_item['url'] += '&%s={0}' % url_data['sp'] else: url_item['url'] += '&signature={0}' if not 'ratebypass' in url_item['url']: url_item['url'] += '&ratebypass=yes' url_map[url_data['itag']] = url_item video_url_list = self._get_video_url_list(url_map, allowVP9) if video_info.get('hlsvp') and not video_url_list: is_m3u8 = 'yes' manifest_url = _unquote(video_info['hlsvp'], None) url_map = self._extract_from_m3u8(manifest_url, video_id) video_url_list = self._get_video_url_list(url_map, allowVP9) if video_info.get('player_response') and not video_url_list: is_m3u8 = 'yes' manifest_url = _unquote(video_info['player_response'], None) manifest = re.search('"hlsManifestUrl":"(.*?)"', manifest_url) if manifest: manifest_url = manifest.group(1) url_map = self._extract_from_m3u8(manifest_url, video_id) video_url_list = self._get_video_url_list(url_map, allowVP9) if video_info.get('player_response') and not video_url_list: try: is_m3u8 = 'no' cipher = {} url_data_str = json_loads( _unquote(video_info['player_response'], None))['streamingData']['formats'] try: url_data_str += json_loads( _unquote(video_info['player_response'], None))['streamingData']['adaptiveFormats'] except Exception: printExc() for url_data in url_data_str: printDBG(str(url_data)) if 'url' in url_data: url_item = {'url': url_data['url']} else: cipher = url_data.get('cipher', '') + url_data.get( 'signatureCipher', '') printDBG(cipher) cipher = cipher.split('&') for item in cipher: #sig_item = '' #s_item = '' #sp_item = '' if 'url=' in item: url_item = { 'url': _unquote(item.replace('url=', ''), None) } if 'sig=' in item: sig_item = item.replace('sig=', '') if 's=' in item: s_item = item.replace('s=', '') if 'sp=' in item: sp_item = item.replace('sp=', '') if 'sig' in cipher: signature = sig_item url_item['url'] += '&signature=' + signature elif len(s_item): url_item['esign'] = _unquote(s_item) if len(sp_item): url_item['url'] += '&%s={0}' % sp_item else: url_item['url'] += '&signature={0}' if not 'ratebypass' in url_item['url']: url_item['url'] += '&ratebypass=yes' url_map[str(url_data['itag'])] = url_item video_url_list = self._get_video_url_list(url_map, allowVP9) except Exception: printExc() if not video_url_list: return [] if self.cm.isValidUrl(dashmpd): sign = ph.search(dashmpd, r'/s/([a-fA-F0-9\.]+)')[0] if sign: dashmpd = dashmpd.replace(sign, '{0}') video_url_list.append(('mpd', {'url': dashmpd})) if sign: video_url_list[-1][1]['esign'] = sign signItems = [] signatures = [] for idx in range(len(video_url_list)): if 'esign' in video_url_list[idx][1]: signItems.append(video_url_list[idx][1]) signatures.append(video_url_list[idx][1]['esign']) if len(signatures): # decrypt signatures printDBG("signatures: %s" % signatures) playerUrl = '' tmp = ph.find(video_webpage, ('<script', '>', 'player/base'))[1] playerUrl = ph.getattr(tmp, 'src') if not playerUrl: for reObj in [ '"assets"\:[^\}]+?"js"\s*:\s*"([^"]+?)"', 'src="([^"]+?)"[^>]+?name="player.*?/base"', '"jsUrl":"([^"]+?)"' ]: playerUrl = ph.search(video_webpage, reObj)[0] if playerUrl: break playerUrl = self.cm.getFullUrl(playerUrl.replace('\\', ''), self.cm.meta['url']) if playerUrl: decSignatures = CYTSignAlgoExtractor( self.cm).decryptSignatures(signatures, playerUrl) if len(signatures) == len(signItems): try: for idx in range(len(signItems)): signItems[idx]['url'] = signItems[idx][ 'url'].format(decSignatures[idx]) except Exception: printExc() SetIPTVPlayerLastHostError( _('Decrypt Signatures Error')) return [] else: return [] if isGoogleDoc: cookieHeader = self.cm.getCookieHeader(COOKIE_FILE) sub_tracks = self._get_subtitles(video_id) results = [] for format_param, url_item in video_url_list: # Extension video_extension = self._video_extensions.get(format_param, 'flv') #video_format = '{0} - {1}'.format(format_param if format_param else video_extension, # self._video_dimensions.get(format_param, '???')) video_format = self._video_dimensions.get(format_param, '???') video_real_url = url_item['url'] if len(sub_tracks): video_real_url = strwithmeta( video_real_url, {'external_sub_tracks': sub_tracks}) if isGoogleDoc: video_real_url = strwithmeta(video_real_url, {'Cookie': cookieHeader}) results.append({ 'id': video_id, 'url': video_real_url, 'uploader': '', 'title': '', 'ext': video_extension, 'format': video_format, 'thumbnail': '', 'duration': video_duration, 'player_url': '', 'm3u8': is_m3u8, }) return results
def getVideoLink(self, cItem): printDBG("MLBStreamTVApi.getVideoLink") urlsTab = [] urlParams = dict(self.defaultParams) urlParams['header'] = dict(urlParams['header']) urlParams['header']['Referer'] = cItem.get('Referer', cItem['url']) sts, data = self.cm.getPage(cItem['url'], urlParams) if not sts: return [] if cItem.get('get_iframe', False): url = self.getFullUrl(self.cm.ph.getSearchGroups(data, '''<iframe[^>]+?src=['"]([^"^']+?)['"]''', 1, True)[0], self.cm.meta['url']) if url != '': urlParams['header']['Referer'] = self.cm.meta['url'] sts, data = self.cm.getPage(url, urlParams) if not sts: return urlsTab cUrl = self.cm.meta['url'] tmp = self.cm.ph.getDataBeetwenMarkers(data, 'unescape(', ')', False)[1].strip() data = urllib.unquote(data[1:-1]) + data printDBG("+++") printDBG(data) printDBG("+++") source = self.cm.ph.getSearchGroups(data, '''[\s\{\,]['"]?source['"]?\s*:\s*['"](https?://[^'^"]+?)['"]''', 1, True)[0] replace = self.cm.ph.getSearchGroups(data, '''[\s\{\,]['"]?replace['"]?\s*:\s*['"](https?://[^'^"]+?)['"]''', 1, True)[0] keyurl = self.cm.ph.getSearchGroups(data, '''[\s\{\,]['"]?keyurl['"]?\s*:\s*['"](https?://[^'^"]+?)['"]''', 1, True)[0] rewrittenUrl = self.cm.ph.getSearchGroups(data, '''\=\s*?['"]([^'^"]+?)['"]\s*?\+\s*?btoa''', 1, True)[0] replaceTab = self.cm.ph.getDataBeetwenMarkers(data, 'prototype.open', '};', False)[1] printDBG(replaceTab) replaceTab = re.compile('''\.replace\(['"](\s*[^'^"]+?)['"]\s*\,\s*['"]([^'^"]+?)['"]''').findall(replaceTab) printDBG(replaceTab) scriptUrl = '' hlsTab = getDirectM3U8Playlist(source, checkContent=True, sortWithMaxBitrate=9000000) if keyurl == '' and 1 == len(replaceTab): replace = replaceTab[0][0] keyurl = replaceTab[0][1] if replace != '' and keyurl != '': for idx in range(len(hlsTab)): hlsTab[idx]['url'] = strwithmeta(hlsTab[idx]['url'], {'iptv_m3u8_key_uri_replace_old': replace, 'iptv_m3u8_key_uri_replace_new': keyurl}) elif len(replaceTab): scriptUrl = '|' + base64.b64encode(json_loads(replaceTab)) elif rewrittenUrl != '': scriptUrl = '<proxy>' + rewrittenUrl elif '/js/nhl.js' in data: scriptUrl = self.getFullUrl(self.cm.ph.getSearchGroups(data, '''<script[^>]+?src=['"]([^"^']*?js/nhl\.js)['"]''', 1, True)[0], self.cm.meta['url']) if scriptUrl != '': for idx in range(len(hlsTab)): hlsTab[idx]['need_resolve'] = 1 hlsTab[idx]['url'] = strwithmeta(hlsTab[idx]['url'], {'name': cItem['name'], 'Referer': cUrl, 'priv_script_url': scriptUrl}) urlsTab = hlsTab return urlsTab
def doLogin(self, login, password): printDBG("VideoStarApi.doLogin") logged = False httpParams = dict(self.defaultParams) actionUrl = self.getFullUrl('v1/user', 'api') sts, data = self.cm.getPage(actionUrl, httpParams) printDBG(">>> user >>>") printDBG(data) printDBG("<<<") if sts: try: data = json_loads(data, '', True) if '' != data['data']['token']: self.userToken = data['data']['token'] return True, '' except Exception: printExc() loginUrl = self.getFullUrl('/login') errMessage = _("Get page \"%s\" error.") sts, data = self.cm.getPage(loginUrl, self.defaultParams) if not sts: return False, (errMessage % loginUrl) # sts, data = self.cm.ph.getDataBeetwenNodes(data, ('<form', '>', 'login'), ('</form', '>')) # if not sts: return False, "" link = self.cm.ph.getSearchGroups( data, '''<link as="script" rel="preload" href=['"](\/gatsby\-statics\/app\-[^'^"]+?)['"]''' )[0] sts, data = self.cm.getPage(self.getFullUrl(link), self.defaultParams) if not sts: return False, (errMessage % loginUrl) if login != 'guest': sitekey = self.cm.ph.getSearchGroups( data, '''GRECAPTCHA_SITEKEY.*?['"]([^'^"]+?)['"]''')[0] if sitekey != '': token, errorMsgTab = self.processCaptcha(sitekey, loginUrl) if token == '': return False, errorMsgTab else: return False, errorMsgTab post_data = '{"login":"******","password":"******","g-recaptcha-response":"%s","permanent":"1","device":"web"}' % ( login, password, token) else: post_data = '{"login":"******","password":"******","permanent":"1","device":"web"}' % ( login, password) actionUrl = self.getFullUrl('v1/user_auth/login', 'api') httpParams['header'] = dict(httpParams['header']) httpParams['header']['Referer'] = loginUrl httpParams['raw_post_data'] = True sts, data = self.cm.getPage(actionUrl, httpParams, post_data) printDBG(">>> user_auth/login >>>") printDBG(data) printDBG("<<<") if sts: errMessage = '' try: data = json_loads(data, '', True) if '' == data['data']['token']: errMessage = 'Błędne dane do logowania.' else: self.userToken = data['data']['token'] return True, '' except Exception: printExc() errMessage = 'Niezrozumiała odpowiedź serwera.' if errMessage != '': return False, errMessage else: return False, (errMessage % actionUrl) return False, _("Unknown error.")
def getList(self, cItem): printDBG("MLBStreamTVApi.getList cItem[%s]" % cItem) channelsList = [] category = cItem.get('priv_cat') if category == None: tab = [{'url': 'http://mlbstream.tv/', 'icon': self.DEFAULT_ICON_URL}, {'url': 'http://nhlstream.tv/', 'icon': 'http://nhlstream.tv/wp-content/uploads/2018/09/nhl-logo.png'}, #{'url':'http://nflstream.tv/'}, #{'url':'http://nbastream.tv/'}, ] for item in tab: channelsList.append({'name': 'mlbstream.tv', 'type': 'dir', 'priv_cat': 'list_items', 'title': item['url'], 'url': item['url'], 'icon': item['icon']}) elif category == 'list_items': defaultIcon = cItem.get('icon', '') sts, data = self.cm.getPage(cItem['url'], self.defaultParams) if not sts: return [] cUrl = self.cm.meta['url'] tmp = self.cm.ph.getDataBeetwenNodes(data, ('<ul', '>', 'menu-menu'), ('</ul', '>'), False)[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<a', '</a>') if len(tmp): url = self.getFullUrl(self.cm.ph.getSearchGroups(tmp[-1], '''href=['"]([^'^"]+?)['"]''')[0], cUrl) title = self.cleanHtmlStr(tmp[-1]) sts, tmp = self.cm.getPage(url, self.defaultParams) if sts and '<iframe' in tmp: title = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(tmp, '<title', '</title>')[1]) url = self.getFullUrl(self.cm.ph.getSearchGroups(tmp, '''<iframe[^>]+?src=['"]([^"^']+?)['"]''', 1, True)[0], self.cm.meta['url']) channelsList.append({'name': 'mlbstream.tv', 'type': 'video', 'url': url, 'title': title, 'Referer': self.cm.meta['url'], 'icon': defaultIcon}) sDesc = self.cleanHtmlStr(self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'entry-content'), ('</', '>'), False)[1]) data = self.cm.ph.getDataBeetwenReMarkers(data, re.compile('var\s+?timezoneJSON\s*?=\s*?\['), re.compile('\];'), False)[1] try: data = json_loads('[%s]' % data) for sData in data: subItems = [] sTitle = '' sData = self.cm.ph.getAllItemsBeetwenMarkers(sData, '<tr', '</tr>') for item in sData: if 'colspan' in item: if len(subItems): channelsList.append({'name': 'mlbstream.tv', 'type': 'dir', 'priv_cat': 'sub_items', 'title': sTitle, 'sub_items': subItems, 'desc': sDesc, 'icon': defaultIcon}) subItems = [] sTitle = self.cleanHtmlStr(item) continue date = self.cm.ph.getSearchGroups(item, '''data\-token=['"]([^'^"]+?)['"]''')[0] date = datetime.fromtimestamp(int(date)) url = self.cm.ph.getSearchGroups(item, '''\sdata\-link=['"]([^'^"]+?)['"]''')[0] item = self.cm.ph.getAllItemsBeetwenMarkers(item, '<td', '</td>') title = self.cleanHtmlStr(''.join(item[3:])) icon = self.getFullIconUrl(self.cm.ph.getSearchGroups(item[3], '''<img[^>]+?src=['"]([^'^"]+?)['"]''')[0]) desc = self.cleanHtmlStr(item[2]) desc += '[/br]%s %s' % (self.cm.getBaseUrl(self.cm.meta['url'], True), date.strftime('%A, %-d %B %H:%M')) subItems.append({'name': 'mlbstream.tv', 'type': 'dir', 'priv_cat': 'links', 'title': title, 'url': self.getFullUrl(url, self.cm.meta['url']), 'desc': desc, 'icon': icon}) if len(subItems): channelsList.append({'name': 'mlbstream.tv', 'type': 'dir', 'priv_cat': 'sub_items', 'title': sTitle, 'sub_items': subItems, 'desc': sDesc, 'icon': defaultIcon}) except Exception: printExc() elif category == 'sub_items': channelsList = cItem['sub_items'] else: urlParams = dict(self.defaultParams) urlParams['header'] = dict(urlParams['header']) urlParams['header']['Referer'] = cItem['url'] sts, data = self.cm.getPage(cItem['url'], urlParams) if not sts: return [] tmp = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'custom-related-links'), ('</div', '>'))[1] tmp = self.cm.ph.getAllItemsBeetwenNodes(tmp, ('<a', '>'), ('</a', '>')) for item in tmp: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+?)['"]''')[0], self.cm.meta['url']) title = '%s - %s' % (cItem['title'], self.cleanHtmlStr(item)) params = dict(cItem) params.update({'type': 'video', 'title': title, 'url': url, 'Referer': self.cm.meta['url'], 'get_iframe': True}) channelsList.append(params) url = self.getFullUrl(self.cm.ph.getSearchGroups(data, '''<iframe[^>]+?src=['"]([^"^']+?)['"]''', 1, True)[0], self.cm.meta['url']) params = dict(cItem) params.update({'type': 'video', 'url': url, 'Referer': self.cm.meta['url']}) channelsList.insert(0, params) return channelsList
def getLinks(self, id): printDBG("TvnVod.getLinks cItem.id[%r]" % id) videoUrls = [] for pl in ['Panasonic', 'Samsung', 'Android2']: #, 'Android4']: #'Android', ''Samsung', if pl in ['Android', 'Android2', 'Panasonic']: url = '&type=episode&id=%s&limit=%d&page=1&sort=newest&m=%s' % ( id, self.itemsPerPage, 'getItem') else: url = 'm=getItem&id=%s&android23video=1&deviceType=Tablet&os=4.1.1&playlistType=&connectionType=WIFI&deviceScreenWidth=1920&deviceScreenHeight=1080&appVersion=3.3.4&manufacturer=unknown&model=androVMTablet' % id url = self.getBaseUrl(pl) + url sts, data = self.cm.getPage(url, {'header': self.getHttpHeader(pl)}) if not sts: continue try: data = json_loads(data) if 'success' == data['status']: data = data['item'] # videoTime = 0 # tmp = self._getJItemStr(data, 'run_time', '') # if '' != tmp: # tmp = tmp.split(":") # videoTime = int(tmp[0])*60*60+int(tmp[1])*60+int(tmp[2]) plot = self._getJItemStr(data, 'lead', '') printDBG("data:\n%s\n" % data) videos = data['videos']['main']['video_content'] if None == videos: SetIPTVPlayerLastHostError("DRM protection.") else: for video in videos: url = self._getJItemStr(video, 'url', '') if '' == url: SetIPTVPlayerLastHostError("DRM protection.") # url = self._getJItemStr(video, 'src', '') if '' != url: url = strwithmeta(url, {'tvn_platform': pl}) qualityName = self._getJItemStr( video, 'profile_name', '') videoUrls.append({ 'name': qualityName, 'profile_name': qualityName, 'url': url, 'need_resolve': 1 }) if 1 < len(videoUrls): max_bitrate = int( config.plugins.iptvplayer.TVNDefaultformat.value) def __getLinkQuality(itemLink): return int( TvnVod.QUALITIES_TABLE.get( itemLink['profile_name'], 9999)) videoUrls = CSelOneLink(videoUrls, __getLinkQuality, max_bitrate).getSortedLinks() if config.plugins.iptvplayer.TVNUseDF.value: videoUrls = [videoUrls[0]] except Exception: printExc() if len(videoUrls): break return videoUrls
def getLinksForVideo(self, cItem): printDBG("C3player.getLinksForVideo [%s]" % cItem) linksTab = [] hlsLinksTab = [] hdsLinksTab = [] sts, data = self.cm.getPage(cItem['url']) if not sts: return [] hlsUrl = self.cm.ph.getSearchGroups(data, '''['"]?file['"]?\s*?:\s*?['"](https?://[^'^"]+?\.m3u8(?:\?[^'^"]+?)?)['"]''')[0] if hlsUrl != '': hlsLinksTab = getDirectM3U8Playlist(hlsUrl, checkExt=True, checkContent=True, sortWithMaxBitrate=999999999) else: embedToken = self.cm.ph.getSearchGroups(data, '''['"]?embedToken['"]?\s*?:\s*?['"](https?://[^'^"]+?)['"]''')[0] if embedToken == '': errorMsg = self.cleanHtmlStr(self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'geo_block'), ('</div', '>'))[1]) SetIPTVPlayerLastHostError(errorMsg) if embedToken == '' and config.plugins.iptvplayer.tv3player_use_web_proxy.value: # http://getproxi.es/IE-proxies/ proxy = 'http://ruproxy.herokuapp.com/index.php?q={0}&hl=2e1'.format(urllib.quote_plus(cItem['url'])) params = {'header': dict(self.HEADER)} params['header']['Referer'] = proxy params.update({'cookie_items': {'flags': '2e1'}, 'use_cookie': True}) sts, data = self.cm.getPage(proxy, params) if not sts: return [] printDBG("+++++++++++++++++++++++++++++++++++++++") printDBG(data) printDBG("+++++++++++++++++++++++++++++++++++++++") embedToken = self.cm.ph.getSearchGroups(data, '''['"]?embedToken['"]?\s*?:\s*?['"](https?://[^'^"]+?)['"]''')[0] drmProtection = False if embedToken != '': parsedUri = urlparse(embedToken) auth = parsedUri.path.split('/embed_token/', 1)[-1].split('/') if len(auth) > 1: url = 'https://player.ooyala.com/sas/player_api/v2/authorization/embed_code/%s/%s?embedToken=%s&device=html5&domain=www.tv3.ie&auth_token=' % (auth[0], auth[1], urllib.quote_plus(embedToken)) sts, data = self.cm.getPage(url) if not sts: return [] try: drmProtection = False #printDBG(data) data = json_loads(data) for item in data['authorization_data'][auth[1]]['streams']: if item['url']['format'] == 'encoded': url = base64.b64decode(item['url']['data']) else: url = item['url']['data'] if item['delivery_type'] == 'hls': if item.get('drm'): drmProtection = True hlsLinksTab = getDirectM3U8Playlist(url, checkExt=True, checkContent=True, sortWithMaxBitrate=999999999) elif item['delivery_type'] == 'hds': hdsLinksTab = getF4MLinksWithMeta(url, checkExt=False, sortWithMaxBitrate=999999999) except Exception: printExc() printDBG(hlsLinksTab) if drmProtection: SetIPTVPlayerLastHostError(_('Link protected with DRM.')) return [] for idx in range(len(hlsLinksTab)): hlsLinksTab[idx]['url'] = strwithmeta(hlsLinksTab[idx]['url'], {'iptv_proto': 'm3u8'}) for idx in range(len(hdsLinksTab)): hdsLinksTab[idx]['url'] = strwithmeta(hdsLinksTab[idx]['url'], {'iptv_proto': 'f4m'}) linksTab.extend(hlsLinksTab) linksTab.extend(hdsLinksTab) return linksTab
def getVideoLink(self, cItem): printDBG("VideoStarApi.getVideoLink") urlsTab = [] idx = cItem.get('priv_idx', -1) if idx < 0 or idx >= len(self.cacheChannelList): return urlsTab vidItem = self.cacheChannelList[idx] formatId = config.plugins.iptvplayer.videostar_streamprotocol.value tries = 0 while True: tries += 1 try: if self.loggedIn: url = 'v1/channel/%s?format_id=%s&device_type=web' % ( vidItem['id'], formatId) else: url = 'v1/guest/channel/%s?format_id=%s&device_type=web' % ( vidItem['id'], formatId) url = self.getFullUrl(url, 'api') sts, data = self.cm.getPage(url, self.defaultParams) printDBG(data) if not sts and not self.loggedIn and tries == 1: rm(self.COOKIE_FILE) self.doLogin('guest', 'guest') sts, data = self.cm.getPage( self.getFullUrl('/static/guest/channels/list/web.json', 'static'), self.defaultParams) if sts: continue if not sts: break data = json_loads(data) if data['data'] != None: for item in data['data']['stream_channel']['streams']: if formatId == '2': if 'hls' in item['type']: hslUrl = item['url'][0] # add here random urlsTab.extend( getDirectM3U8Playlist( hslUrl, checkExt=False, cookieParams=self.defaultParams, checkContent=True)) elif formatId in ['3', '4']: if 'dash' in item['type']: dashUrl = item['url'][0] # add here random urlsTab.extend( getMPDLinksWithMeta( dashUrl, checkExt=False, cookieParams=self.defaultParams)) elif data['_meta'] != None: info = data['_meta']['error']['info'] message = [] message.append( 'Oglądasz już kanał %s na urządeniu %s o adresie: %s.' % (info['channel_name'], info['device'], info['user_ip'])) message.append( 'W WP Pilocie nie możesz oglądać większej liczby kanałów jednocześnie.' ) message.append('Czy chcesz kontynować tutaj?') arg1 = self.sessionEx.waitForFinishOpen( MessageBox, '\n'.join(message), type=MessageBox.TYPE_YESNO) if arg1: url = self.getFullUrl('v1/channels/close', 'api') paramsUrl = dict(self.defaultParams) paramsUrl['header'] = dict(paramsUrl['header']) paramsUrl['header']['Referer'] = self.getFullUrl('tv') paramsUrl['header']['Origin'] = self.MAIN_URL[:-1] paramsUrl['header'][ 'content-type'] = 'application/json;charset=UTF-8' paramsUrl['raw_post_data'] = True sts, data = self.cm.getPage( url, paramsUrl, '{"channelId":"%s","t":"%s"}' % (info['channel_id'], self.userToken)) printDBG("==================== token1[%s] token2[%s]" % (self.userToken, info['stream_token'])) printDBG(data) printDBG("====================") continue except Exception: printExc() break if len(urlsTab): cookieHeader = self.cm.getCookieHeader(self.COOKIE_FILE) for idx in range(len(urlsTab)): urlsTab[idx]['url'] = strwithmeta(urlsTab[idx]['url'], { 'Cookie': cookieHeader, 'User-Agent': self.USER_AGENT }) if len(urlsTab): maxBitrate = int( config.plugins.iptvplayer.videostar_defquality.value) * 1.3 def __getLinkQuality(itemLink): try: if 'bitrate' in itemLink: return int(itemLink['bitrate']) elif 'bandwidth' in itemLink: return int(itemLink['bandwidth']) except Exception: printExc() return 0 oneLink = CSelOneLink(urlsTab, __getLinkQuality, maxBitrate) urlsTab = oneLink.getSortedLinks() if config.plugins.iptvplayer.videostar_use_defquality.value: urlsTab = [urlsTab[0]] return urlsTab
def getList(self, cItem): printDBG("WebCameraApi.getChannelsList") self.currList = [] try: category = cItem.get('priv_category', '') if category == '': params = dict(cItem) params.update({ 'title': _('main'), 'priv_category': 'list_items' }) self.addDir(params) sts, data = self.getPage(self.getMainUrl()) if not sts: return [] data = self.cm.ph.getDataBeetwenMarkers( data, '<nav', '</nav>', False)[1] data = re.compile('(<li[^>]*?>|</li>|<ul[^>]*?>|</ul>)').split( data) if len(data) > 1: try: cTree = self.listToDir(data[1:-1], 0)[0] params = dict(cItem) params['c_tree'] = cTree['list'][0] params['priv_category'] = 'list_categories' self.listCategories(params, 'list_items') except Exception: printExc() elif category == 'list_categories': self.listCategories(cItem, 'list_items') elif category == 'list_items': page = cItem.get('page', 1) getPageParams = dict(self.defaultParams) if page > 1: self.defaultParams['header'][ 'X-Requested-With'] = 'XMLHttpRequest' sts, data = self.getPage(cItem['url'], getPageParams) if not sts: return [] if page == 1: tmp = self.cm.ph.getSearchGroups( data, '''(<div[^>]+?inline\-camera\-listing[^>]+?>)''')[0] printDBG(">> \"%s\"" % tmp) tmp = re.compile( '''data\-([^=^'^"^\s]+?)\s*=\s*['"]([^'^"]+?)['"]''' ).findall(tmp) cItem = dict(cItem) cItem['more_params'] = {} for item in tmp: cItem['more_params'][item[0]] = item[1] cItem['more_url'] = self.cm.ph.getSearchGroups( data, '''['"]([^'^"]*?/ajax/[^'^"]+?)['"]''')[0] else: try: data = json_loads(data, '', True)['html'] except Exception: printExc() return [] data = self.cm.ph.getAllItemsBeetwenNodes( data, ('<div', '>', 'inlinecam'), ('</div', '>')) vidCount = 0 for item in data: url = self.cm.ph.getSearchGroups( item, """href=['"]([^'^"]+?)['"]""")[0] if '' != url: title = self.cleanHtmlStr(item) icon = self.cm.ph.getSearchGroups( item, """data\-src=['"]([^'^"]+?)['"]""")[0] if icon == '': icon = self.cm.ph.getSearchGroups( item, """src=['"]([^'^"]+?\.jpg[^'^"]*?)['"]""")[0] params = dict(cItem) params.update({ 'title': title, 'url': self.getFullUrl(url), 'icon': self.getFullIconUrl(icon) }) self.addVideo(params) vidCount += 1 # check if next page is needed if vidCount > 0: urlPrams = dict(cItem['more_params']) urlPrams['page'] = page + 1 try: urlPrams['cameras'] = page * int(urlPrams['limit']) - 1 except Exception: printExc() try: urlPrams['columns'] = page * (int(urlPrams['limit']) + 1) except Exception: printExc() #urlPrams['cameras'] = '14' #urlPrams['columns'] = '12' url = self.getFullUrl(cItem['more_url']) url += '?' + urllib.urlencode(urlPrams) getPageParams['header'][ 'X-Requested-With'] = 'XMLHttpRequest' sts, data = self.getPage(url, getPageParams) if sts and data.startswith( '{') and '"last":true' not in data: params = dict(cItem) params.update({ 'title': _('Next page'), 'url': url, 'page': page + 1 }) self.addDir(params) except Exception: printExc() self.addDefaultIcons() return self.currList
def getSearchResult(self, pattern, searchType, page, nextPageCategory, sortBy='', url=''): printDBG( 'YouTubeParser.getSearchResult pattern[%s], searchType[%s], page[%s]' % (pattern, searchType, page)) currList = [] try: #url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s&page=%s' % (pattern, searchType, sortBy, page) nextPage = {} nP = {} nP_new = {} r2 = [] if url: # next page search sts, data = self.cm.getPage(url, self.http_params, self.postdata) if sts: response = json_loads(data) else: # new search # url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s' % (pattern, searchType, sortBy) url = 'https://www.youtube.com/results?search_query=' + pattern + '&sp=' if searchType == 'video': url += 'EgIQAQ%253D%253D' if searchType == 'channel': url += 'EgIQAg%253D%253D' if searchType == 'playlist': url += 'EgIQAw%253D%253D' if searchType == 'live': url += 'EgJAAQ%253D%253D' sts, data = self.cm.getPage(url, self.http_params) if sts: self.checkSessionToken(data) data2 = self.cm.ph.getDataBeetwenMarkers( data, "window[\"ytInitialData\"] =", "};", False)[1] if len(data2) == 0: data2 = self.cm.ph.getDataBeetwenMarkers( data, "var ytInitialData =", "};", False)[1] response = json_loads(data2 + "}") if not sts: return [] #printDBG("--------------------") #printDBG(json_dumps(response)) #printDBG("--------------------") # search videos r2 = list(self.findKeys(response, 'videoRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getVideoData(item) if params: printDBG(str(params)) currList.append(params) # search channels r2 = list(self.findKeys(response, 'channelRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getChannelData(item) if params: printDBG(str(params)) currList.append(params) #search playlists r2 = list(self.findKeys(response, 'playlistRenderer')) printDBG("---------------------") printDBG(json_dumps(r2)) printDBG("---------------------") for item in r2: params = self.getPlaylistData(item) if params: printDBG(str(params)) currList.append(params) nP = list(self.findKeys(response, "nextContinuationData")) nP_new = list(self.findKeys(response, "continuationEndpoint")) if nP: nextPage = nP[0] #printDBG("-------------------------------------------------") #printDBG(json_dumps(nextPage)) #printDBG("-------------------------------------------------") ctoken = nextPage["continuation"] itct = nextPage["clickTrackingParams"] try: label = nextPage["label"]["runs"][0]["text"] except: label = _("Next Page") urlNextPage = self.updateQueryUrl( url, { 'pbj': '1', 'ctoken': ctoken, 'continuation': ctoken, 'itct': itct }) params = { 'type': 'more', 'category': "search_next_page", 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) elif nP_new: printDBG("-------------------------------------------------") printDBG(json_dumps(nP_new)) printDBG("-------------------------------------------------") nextPage = nP_new[0] ctoken = nextPage["continuationCommand"]["token"] itct = nextPage["clickTrackingParams"] label = _("Next Page") urlNextPage = self.updateQueryUrl( url, { 'pbj': '1', 'ctoken': ctoken, 'continuation': ctoken, 'itct': itct }) params = { 'type': 'more', 'category': "search_next_page", 'title': label, 'page': str(int(page) + 1), 'url': urlNextPage } printDBG(str(params)) currList.append(params) except Exception: printExc() return currList
def getLinksForVideo(self, cItem): urlsTab = [] rm(self.COOKIE_FILE) sts, data = self.getPage(cItem['url']) if not sts: return [] tmp = ph.find(data, ('<glomex-player', '>'))[1] if tmp: player_id = ph.getattr(tmp, 'data-player-id') playlist_id = ph.getattr(tmp, 'data-playlist-id') url = 'https://integration-cloudfront-eu-west-1.mes.glomex.cloud/?integration_id=%s&playlist_id=%s¤t_url=' % ( player_id, playlist_id) sts, data = self.getPage(url) if not sts: return [] try: data = json_loads(data)['videos'][0]['source'] if data.get('hls'): hlsUrl = self.cm.getFullUrl(data['hls'], self.cm.meta['url']) urlsTab = getDirectM3U8Playlist( hlsUrl, checkContent=True, sortWithMaxBitrate=999999999, mergeAltAudio=True) if len(urlsTab): urlsTab.append({ 'name': 'Variable M3U8/HLS', 'url': hlsUrl, 'need_resolve': 0 }) # progressive links seem do not work why? if False and data.get('progressive'): mp4Url = self.cm.getFullUrl(data['progressive'], self.cm.meta['url']) urlsTab.append({ 'name': 'progressive mp4', 'url': mp4Url, 'need_resolve': 0 }) except Exception: printExc() else: urlParams = dict(self.defaultParams) urlParams['header'] = MergeDicts(urlParams['header'], {'Referer': self.cm.meta['url']}) urlParams['raw_post_data'] = True urlParams['use_new_session'] = True playerData = ph.find(data, 'getPlayer(', ');', flags=0)[1].split(',') printDBG("playerData <<< %s" % playerData) if len(playerData) == 6: url = self.cm.getFullUrl( '/videoplayer/playerhls.php?play=%s&key=%d&identifier=web&v5partner=%s&autoplay=true&event' % (playerData[1].strip(), int( time.time() * 1000), playerData[3].strip()), self.cm.meta['url']) sts, data = self.getPage(url, urlParams) urlParams['header'] = MergeDicts( urlParams['header'], {'Referer': self.cm.meta['url']}) url = self.cm.getFullUrl( '/server/videoConfig.php?videoid=%s&partnerid=%s&language=%s&format=iphone' % (playerData[1].strip(), playerData[3].strip(), playerData[5].strip()[1:-1]), self.cm.meta['url']) sts, data = self.getPage(url, urlParams) try: url = json_loads(data)['video']['streamAccess'] url = self.cm.getFullUrl(url, self.cm.meta['url']) sts, data = self.getPage(url, urlParams, '[""]') try: printDBG("++++") printDBG(data) printDBG("++++") data = json_loads(data)['data']['stream-access'] for url in data: sts, streamData = self.getPage( self.cm.getFullUrl(url, self.cm.meta['url']), urlParams) if not sts: continue printDBG("?----?") printDBG(data) printDBG("?----?") token = ph.getattr(streamData, 'auth') hlsUrl = self.cm.getFullUrl( ph.getattr(streamData, 'url'), self.cm.meta['url']) + '?hdnea=' + token urlsTab = getDirectM3U8Playlist( hlsUrl, checkContent=True, sortWithMaxBitrate=999999999, mergeAltAudio=True) break except Exception: printExc() except Exception: printExc() return urlsTab
def getDirectLinks(self, url, formats='flv, mp4', dash=True, dashSepareteList=False, allowVP9=None, allowAgeGate=None): printDBG('YouTubeParser.getDirectLinks') list = [] try: if self.cm.isValidUrl(url) and '/channel/' in url and url.endswith( '/live'): sts, data = self.cm.getPage(url) if sts: videoId = self.cm.ph.getSearchGroups( data, '''<meta[^>]+?itemprop=['"]videoId['"][^>]+?content=['"]([^'^"]+?)['"]''' )[0] if videoId == '': videoId = self.cm.ph.getSearchGroups( data, '''['"]REDIRECT_TO_VIDEO['"]\s*\,\s*['"]([^'^"]+?)['"]''' )[0] if videoId != '': url = 'https://www.youtube.com/watch?v=' + videoId list = YoutubeIE()._real_extract(url, allowVP9=allowVP9, allowAgeGate=allowAgeGate) except Exception: printExc() if dashSepareteList: return [], [] else: return [] reNum = re.compile('([0-9]+)') retHLSList = [] retList = [] dashList = [] # filter dash dashAudioLists = [] dashVideoLists = [] if dash: # separete audio and video links for item in list: if 'mp4a' == item['ext']: dashAudioLists.append(item) elif item['ext'] in ('mp4v', 'webmv'): dashVideoLists.append(item) elif 'mpd' == item['ext']: tmpList = getMPDLinksWithMeta(item['url'], checkExt=False) printDBG(tmpList) for idx in range(len(tmpList)): tmpList[idx]['format'] = "%sx%s" % (tmpList[idx].get( 'height', 0), tmpList[idx].get('width', 0)) tmpList[idx]['ext'] = "mpd" tmpList[idx]['dash'] = True dashList.extend(tmpList) # sort by quality -> format def _key(x): if x['format'].startswith('>'): int(x['format'][1:-1]) else: int(ph.search(x['format'], reNum)[0]) dashAudioLists = sorted(dashAudioLists, key=_key, reverse=True) dashVideoLists = sorted(dashVideoLists, key=_key, reverse=True) for item in list: printDBG(">>>>>>>>>>>>>>>>>>>>>") printDBG(item) printDBG("<<<<<<<<<<<<<<<<<<<<<") if -1 < formats.find(item['ext']): if 'yes' == item['m3u8']: format = re.search('([0-9]+?)p$', item['format']) if format != None: item['format'] = format.group(1) + "x" item['ext'] = item['ext'] + "_M3U8" item['url'] = decorateUrl(item['url'], {"iptv_proto": "m3u8"}) retHLSList.append(item) else: format = re.search('([0-9]+?x[0-9]+?$)', item['format']) if format != None: item['format'] = format.group(1) item['url'] = decorateUrl(item['url']) retList.append(item) if len(dashAudioLists): # use best audio for item in dashVideoLists: item = dict(item) item["url"] = decorateUrl( "merge://audio_url|video_url", { 'audio_url': dashAudioLists[0]['url'], 'video_url': item['url'] }) dashList.append(item) # try to get hls format with alternative method if 0 == len(retList): try: video_id = YoutubeIE()._extract_id(url) url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id sts, data = self.cm.getPage( url, { 'header': { 'User-agent': 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10' } }) if sts: data = data.replace('\\"', '"').replace('\\\\\\/', '/') hlsUrl = self.cm.ph.getSearchGroups( data, '''"hlsvp"\s*:\s*"(https?://[^"]+?)"''')[0] hlsUrl = json_loads('"%s"' % hlsUrl) if self.cm.isValidUrl(hlsUrl): hlsList = getDirectM3U8Playlist(hlsUrl) if len(hlsList): dashList = [] for item in hlsList: item['format'] = "%sx%s" % (item.get( 'with', 0), item.get('heigth', 0)) item['ext'] = "m3u8" item['m3u8'] = True retList.append(item) except Exception: printExc() if 0 == len(retList): retList = retHLSList if dash: try: sts, data = self.cm.getPage( url, { 'header': { 'User-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } }) data = data.replace('\\"', '"').replace('\\\\\\/', '/').replace('\\/', '/') dashUrl = self.cm.ph.getSearchGroups( data, '''"dashmpd"\s*:\s*"(https?://[^"]+?)"''')[0] dashUrl = json_loads('"%s"' % dashUrl) if '?' not in dashUrl: dashUrl += '?mpd_version=5' else: dashUrl += '&mpd_version=5' printDBG("DASH URL >> [%s]" % dashUrl) if self.cm.isValidUrl(dashUrl): dashList = getMPDLinksWithMeta(dashUrl, checkExt=False) printDBG(dashList) for idx in range(len(dashList)): dashList[idx]['format'] = "%sx%s" % ( dashList[idx].get('height', 0), dashList[idx].get('width', 0)) dashList[idx]['ext'] = "mpd" dashList[idx]['dash'] = True except Exception: printExc() for idx in range(len(retList)): if retList[idx].get('m3u8', False): retList[idx]['url'] = strwithmeta( retList[idx]['url'], {'iptv_m3u8_live_start_index': -30}) if dashSepareteList: return retList, dashList else: retList.extend(dashList) return retList
def tryTologin(self): printDBG('EuroSportPlayer.tryTologin start') errorMsg = _('Error communicating with the server.') if None == self.loggedIn or self.login != config.plugins.iptvplayer.eurosportplayer_login.value or\ self.password != config.plugins.iptvplayer.eurosportplayer_password.value: self.login = config.plugins.iptvplayer.eurosportplayer_login.value self.password = config.plugins.iptvplayer.eurosportplayer_password.value rm(self.COOKIE_FILE) self.loggedIn = False self.loginMessage = '' if '' == self.login.strip() or '' == self.password.strip(): msg = _('The host %s requires subscription.\nPlease fill your login and password in the host configuration - available under blue button.') % self.getMainUrl() GetIPTVNotify().push(msg, 'info', 10) return False try: # get token tokenUrl = self.TOKEN_URL sts, data = self.getPage(tokenUrl) printDBG(data) # get config (also with catpcha site-key) sts, data = self.getPage(self.CONFIG_URL) printDBG(data) # solve captcha to login (token, errorMsgTab) = CaptchaHelper().processCaptcha(self.recaptcha_sitekey, self.LOGIN_URL) if not token: printDBG(str(errorMsgTab)) return printDBG('Captcha token :%s' % token) # try to login header = {'User-Agent': self.USER_AGENT, 'Referer': self.LOGIN_URL, 'x-disco-client': 'WEB:x86_64:WEB_AUTH:1.1.0', 'x-disco-recaptcha-token': token, 'content-type': 'application/json' } postData = {'credentials': {'username': self.login , 'password': self.password }} url = "https://eu3-prod-direct.eurosportplayer.com/login" httpParams = {'header' : header, 'with_metadata':True, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE, 'raw_post_data': True} sts, data = self.getPage(url, httpParams, post_data = json_dumps(postData)) ''' good login { "data" : { "attributes" : { "lastLoginTime" : "2019-11-01T21:45:15Z", "realm" : "eurosport", "token" : "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJVU0VSSUQ6ZXVyb3Nwb3J0OmI4OGQ0YTBhLWQwZDctNDdkZi1iMzI5LWJjNmM5ZDNiOTRjYyIsImp0aSI6InRva2VuLThkOWYxMDgwLWUwNGEtNDMyZi04NDY1LWUwYTgyNDljMjEwMyIsImFub255bW91cyI6ZmFsc2UsImlhdCI6MTU3MjY4NDk3MX0.DtSAY9kAVfwcJKhPXczRlPW3CACd6ZmZwZvJilIrlv8" }, "id" : "token-8d9f1080-e04a-432f-8465-e0a8249c2103", "type" : "token" }, "meta" : { "site" : { "attributes" : { "brand" : "eurosport", "websiteHostName" : "it.eurosportplayer.com" }, "id" : "IT", "type" : "site" } } } ''' ''' example: wrong password { "errors" : [ { "status" : "401", "code" : "unauthorized", "id" : "ATwRg09NZG", "detail" : "" } ] } ''' if not sts and '401' in str(data): msg = _('Login failed. Invalid email or password.') GetIPTVNotify().push(msg, 'error', 10) return False else: data = json_loads(data) printDBG(str(data)) self.loggedIn = True except Exception: printExc() printDBG('EuroSportPlayer.tryTologin end loggedIn[%s]' % self.loggedIn) return self.loggedIn