def tryTologin(self): login = config.plugins.iptvplayer.anyfilespl_login.value password = config.plugins.iptvplayer.anyfilespl_password.value printDBG("AnyFilesVideoUrlExtractor.tryTologin login[%s]" % login) if 0 < len(login) and 0 < len(password): #First we need get JSESSIONID params = dict(self.defaultParams) params['load_cookie'] = False sts, data = self.cm.getPage(self.LOGIN_URL, params) #Then we login and get new JSESSIONID params = dict(self.defaultParams) params['header']['Referer'] = self.LOGIN_URL post_data = {'j_username':login, 'j_password':password} sts, data = self.cm.getPage(self.LOGIN_URL_2, params, post_data) # prev sts will be probably False due to ERROR 302, so there # is there is no sens to check this status here sts,data = self.cm.getPage(self.MAINURL, self.defaultParams) if sts and 'href="/Logo?op=w"' in data: self.logged = True return True else: printDBG("AnyFilesVideoUrlExtractor.tryTologin wrong login data") self.logged = False return False
def handleService(self, index, refresh = 0, searchPattern = '', searchType = ''): printDBG('handleService start') CBaseHostClass.handleService(self, index, refresh, searchPattern, searchType) name = self.currItem.get("name", '') category = self.currItem.get("category", '') printDBG( "handleService: |||||||||||||||||||||||||||||||||||| name[%s], category[%s] " % (name, category) ) self.currList = [] #MAIN MENU if name == None: self.listMainMenu() elif category == 'cats': self.listCategories(self.currItem, 'show_sort') elif category == 'show_sort': cItem = dict(self.currItem) cItem['category'] = 'list_items' self.listsTab(self.sortCache, cItem) elif category == 'list_items': self.listItems(self.currItem, 'list_episodes') elif category == 'list_episodes': self.listEpisodes(self.currItem) #SEARCH elif category in ["search", "search_next_page"]: cItem = dict(self.currItem) cItem.update({'search_item':False, 'name':'category'}) self.listSearchResult(cItem, searchPattern, searchType) #HISTORIA SEARCH elif category == "search_history": self.listsHistory({'name':'history', 'category': 'search'}, 'desc', _("Type: ")) else: printExc() CBaseHostClass.endHandleService(self, index, refresh)
def _getLinksFromContent(self, data, title_key='title', baseItem={}): printDBG("LibreStream._getLinksFromContent") linksTab = [] etitleMap = {} linksMap = {} linksData = self.cm.ph.getDataBeetwenMarkers(data, 'panel-container', '<style>', False)[1] linksData = linksData.split('tab-buttons-panel') for item in linksData: id = self.cm.ph.getSearchGroups(item, 'id="([^"]+?)"')[0] playerUrl = self.cm.ph.getSearchGroups(item, '''<iframe[^>]+?src=["'](http[^"^']+?)["']''', 1, True)[0] if playerUrl.startswith('http') and id != '': linksMap[id] = playerUrl episodeTitle = self.cm.ph.getDataBeetwenMarkers(item, '<h3 class="episodetitle">', '</h3>', False)[1] etitleMap[id] = self.cleanHtmlStr(episodeTitle) servers = self.cm.ph.getDataBeetwenMarkers(data, "<ul class='etabs'>", '</ul>', False)[1] servers = servers.split('</li>') if len(servers): del servers[-1] for item in servers: title = self.cleanHtmlStr( item ) id = self.cm.ph.getSearchGroups(item, 'href="#([^"]+?)"')[0] if id in linksMap: params = dict(baseItem) params.update({title_key:title, 'episode_title':etitleMap.get(id, ''), 'url':linksMap[id]}) linksTab.append(params) return linksTab
def _doSearchMovieNapisy24(self): params = {"User-Agent": self.NAPISY24_USER_AGENT, "Referer": "http://napisy24.pl/"} query = "title={0}".format(urllib.quote(self.tmpData["title"])) url = "'http://napisy24.pl/libs/webapi.php?{0}'".format(query) cmd = DMHelper.getBaseWgetCmd(params) + url + " -O - 2> /dev/null " printDBG("_doSearchMovieNapisy24 cmd[%s]" % cmd) self.iptv_sys = iptv_system(cmd, self._doSearchMovieNapisy24Callback)
def getVideoLinks(self, videoUrl): printDBG("LibreStream.getVideoLinks [%s]" % videoUrl) urlTab = [] if videoUrl.startswith('http'): urlTab = self.up.getVideoLinkExt(videoUrl) return urlTab
def getLinksForVideo(self, Index = 0, selItem = None): listLen = len(self.onet.currList) if listLen < Index and listLen > 0: printDBG( "ERROR getLinksForVideo - current list is to short len: %d, Index: %d" % (listLen, Index) ) return RetHost(RetHost.ERROR, value = []) if self.onet.currList[Index].name != 'playSelectedMovie': printDBG( "ERROR getLinksForVideo - current item has wrong type" ) return RetHost(RetHost.ERROR, value = []) retlist = [] videoID = self.onet.currList[Index].category tab = self.onet.api.getVideoTab(self.onet.currList[Index].category) if config.plugins.iptvplayer.onetvodUseDF.value: maxRes = int(config.plugins.iptvplayer.onetvodDefaultformat.value) * 1.1 tab = CSelOneLink( tab, _getLinkQuality, maxRes ).getOneLink() for item in tab: if item[0] == vodonet.FORMAT: nameLink = "type: %s \t bitrate: %s" % (item[0], item[2]) url = item[1] retlist.append(CUrlItem(nameLink.encode('utf-8'), url.encode('utf-8'), 0)) return RetHost(RetHost.OK, value = retlist)
def listsTab(self, tab, cItem): printDBG("SuperFilm.listsMainMenu") for item in tab: params = dict(cItem) params.update(item) params['name'] = 'category' self.addDir(params)
def getLinksForVideo(self, cItem): printDBG("Cinemay.getLinksForVideo [%s]" % cItem) urlTab = [] sts, data = self.cm.getPage(cItem['url']) if not sts: return [] tmp = self.cm.ph.getDataBeetwenMarkers(data, '<tbody>', '</tbody>', False)[1] tmp = tmp.split('</tr>') if len(tmp): del tmp[-1] for item in tmp: url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0] if '/voir/' in url or '/voire/' in url or '/ser/' in url: title = self.cm.ph.getSearchGroups(item, 'src="[^"]+?/([^/]+?)\.png"')[0] title = '[{0}] {1}'.format(title, self.cleanHtmlStr( item )) urlTab.append({'name':title, 'url':self._getFullUrl(url), 'need_resolve':1}) data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<div class="wbox2 video dark">', '</iframe>') for item in data: videoUrl = self.cm.ph.getSearchGroups(item, '<iframe[^>]+?src="(http[^"]+?)"', 1, True)[0] urlTab.append({'name':self.up.getHostName(videoUrl), 'url':self._getFullUrl(videoUrl), 'need_resolve':1}) if 0 == len(urlTab): urlTab.append({'name':'Main url', 'url':cItem['url'], 'need_resolve':1}) return urlTab
def _cleanedUp(self): if fileExists(self.filePath): try: os_remove(self.filePath) except: printDBG('Problem with removing old buffering file') if fileExists(self.GST_FLV_DEMUX_IS_DEMUXING_INFINITE_FILE): try: os_remove(self.GST_FLV_DEMUX_IS_DEMUXING_INFINITE_FILE) except: printDBG('Problem with removing gstreamer flag file [%s]' % self.GST_FLV_DEMUX_IS_DEMUXING_INFINITE_FILE)
def listsSeriesByLetter(self, cItem, category): printDBG("SerialeNet.listsSeriesByLetter") letter = cItem.get('letter', '') match = self._listsSeries(cItem['url']) for item in match: t1 = item['t1'] t2 = item['t2'] match = False if letter.isalpha(): if letter == self._getNormalizeStr(t1, 0).upper(): match = True elif len(t2) and letter == self._getNormalizeStr(t2, 0).upper(): match = True t1,t2 = t2,t1 else: if not self._isalpha(t1, 0): match = True elif len(t2) and not self._isalpha(t2, 0): match = True t1,t2 = t2,t1 if match: params = dict(cItem) if len(t2): t1 += ' (%s)' % t2 params.update({'title':t1, 'url':item['url'], 'category':category}) self.addDir(params) self.currList.sort(key=lambda item: item['title'])
def listSeasons(self, cItem, category): printDBG("SerialeNet.listSeasons") url = self._getFullUrl(cItem['url']) self.seasonsCache = [] sts, data = self.cm.getPage(url) if sts: desc = self.cm.ph.getDataBeetwenMarkers(data, '<div id="desc">', '</div>', False)[1] icon = self._getFullUrl(self.cm.ph.getSearchGroups(desc, 'src="([^"]+?)"')[0]) desc = self._cleanHtmlStr(desc) data = self.cm.ph.getDataBeetwenMarkers(data, '<div id="wrp1"><br/>', '<script>', False)[1] data = data.split('<div') if len(data): del data[0] for item in data: sts, seasonName = self.cm.ph.getDataBeetwenMarkers(item, '<h3>', '</h3>', False) if sts: self.seasonsCache.append({'title':seasonName, 'episodes':[]}) episodes = re.findall('<a title="([^"]*?)"[^>]+?href="([^"]+?)"[^>]*?>(.+?)</a>', item) for e in episodes: self.seasonsCache[-1]['episodes'].append({'title':self._cleanHtmlStr(e[2]), 'url':e[1]}) if 1 < len(self.seasonsCache): seasonsId = 0 for item in self.seasonsCache: params = dict(cItem) params.update({'seasons_id':seasonsId, 'title':item['title'], 'category':category, 'icon':icon, 'desc':desc}) self.addDir(params) seasonsId += 1 elif 1 == len(self.seasonsCache): cItem.update({'seasons_id':0}) self.listEpisodes(cItem)
def listSearchResult(self, cItem, searchPattern, searchType): printDBG("Vevo.listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType)) searchPattern = urllib.quote_plus(searchPattern) url = self.SEARCH_URL.format(searchPattern) cItem = dict(cItem) cItem.update({'url':url, 'keys':['artists', 'videos']}) self.listApiv2(cItem)
def listSearchResult(self, cItem, searchPattern, searchType): printDBG("GameTrailers.listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType)) url = self.SEARCH_URL + '?keywords=' + urllib.quote_plus(searchPattern) post_data = cItem.get('post_data', None) httpParams = dict(self.defaultParams) ContentType = cItem.get('Content-Type', None) Referer = cItem.get('Referer', None) if None != Referer: httpParams['header'] = {'Referer':Referer, 'User-Agent':self.cm.HOST} else: {'User-Agent':self.cm.HOST} sts, data = self.cm.getPage(url, httpParams, post_data) if not sts: return promotionId = self.cm.ph.getSearchGroups(data, 'promotionId=([^/]+?)/')[0].replace('"', '').replace("'", "") + '/' data = self.cm.ph.getDataBeetwenMarkers(data, '<ul class="module_tabs">', '</ul>', False)[1] data = data.split('</a>') for item in data: sts, tab = self.cm.ph.getDataBeetwenMarkers(item, 'class="tab_', '"', False) if not sts: continue if tab not in ['videos', 'reviews', 'user_movies']: continue title = self.cleanHtmlStr( item ) baseUrl = self.MAIN_URL + 'feeds/search/child/{0}/?keywords={1}&tabName={2}'.format(promotionId, urllib.quote_plus(searchPattern), tab) params = {'name':'category', 'base_url':baseUrl, 'title':title, 'mode':'search', 'search_tab':tab} params['category'] = 'list_sort_by' self.addDir(params)
def listsTab(self, tab, cItem): printDBG("Filmy3dEU.listsTab") for item in tab: params = dict(cItem) params.update(item) params['name'] = 'category' self.addDir(params)
def listBrowseShows(self, cItem, category): printDBG("Vevo.listBrowseShows") if [] == self.cacheShows: if 2 != len(self.language): return url = self.MAIN_URL + 'c/{0}/{1}/shows.json?platform=web'.format(self.language[0], self.language[1]) sts, data = self.cm.getPage(url) if not sts: return try: data = byteify(json.loads(data)) if data['success']: self.cacheShows = data['result'] except: printExc() return for idx in range(len(self.cacheShows)): params = dict(cItem) item = self.cacheShows[idx] icon = item.get('header_image_url', '') if '' == icon: icon = item.get('mobile_image_url', '') if '' == icon: icon = item.get('thumbnail_image_url', '') if 1 <= len(item.get('seasons', [])): category = 'list_show_videos' params.update({'category':category, 'title':item['title'], 'icon':icon, 'desc':item['description'], 'show_id':idx, 'season_id':0}) self.addDir(params)
def getLinksForVideo(self, cItem): printDBG("Filmy3dEU.getLinksForVideo [%s]" % cItem) urlTab = [] sts, data = self.cm.getPage(cItem['url'], {'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE}) if not sts: # no cookie file? sts, data = self.cm.getPage(cItem['url']) if not sts: return urlTab match = re.search('<iframe[^>]+?src="([^"]+?)"', data, re.IGNORECASE) if match: url = match.group(1) urlTab.append({'name':'Main url [%s]' % self.up.getHostName(url), 'url':url, 'need_resolve':1}) sts, data = self.cm.ph.getDataBeetwenMarkers(data, '<code>', '</code>', False) if not sts: return urlTab data = data.split('http:') for item in data: item = item.strip() if not item.startswith('//'): continue url = 'http:' + item urlTab.append({'name':self.up.getHostName(url), 'url':url, 'need_resolve':1}) return urlTab
def getVideoLinks(self, url): printDBG("Movie4kTO.getVideoLinks [%s]" % url) urlTab = [] videoUrl = url urlTab = self.up.getVideoLinkExt(videoUrl) return urlTab
def listItems(self, cItem, url): printDBG("UstreamTV.listItems") sts, data = self.cm.getPage(url) if not sts: return nextPage = False try: data = byteify(json.loads(data)) if not data['success']: return nextPage = data['pageMeta']['infinite'] data = data['pageContent'] data = data.split('<div class="item media-item">') del data[0] for item in data: params = dict(cItem) url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0] title = self.cm.ph.getSearchGroups(item, 'title="([^"]+?)"')[0] icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"')[0] desc = self.cleanHtmlStr( item ) params.update({'title':self.cleanHtmlStr( title ), 'icon':self._getFullUrl(icon), 'desc':desc, 'url':self._getFullUrl(url)}) self.addVideo(params) except: printExc() if nextPage: params = dict(cItem) params.update({'title':_('Next page'), 'page':cItem.get('page', 1)+1}) self.addDir(params)
def listFilters(self, table, cat, cat_id, icon): printDBG("listFilters for cat[%s] cat_id[%s]" % (cat, cat_id)) for item in table: item['category'] = cat item['cat_id'] = cat_id item['icon'] = icon self.addDir(item)
def getTeamCastList(self, cItem): printDBG('getTeamCastList start') #http://team-cast.pl.cp-21.webhostbox.net/kanalyFlash/ #http://team-cast.pl.cp-21.webhostbox.net/ #src="http://team-cast.pl.cp-21.webhostbox.net/kanalyFlash/film/hbo.html" url = cItem['url'] # list categories if '' == url : self.teamCastTab = {} url = 'http://team-cast.pl.cp-21.webhostbox.net/' sts, data = self.cm.getPage(url) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, '<div id="stream-frame">', '<div id="now-watching">', False)[1] # remove commented channels data = re.sub('<!--[^!]+?-->', '', data) data = data.split('<li class="menu_right">') del data[0] for cat in data: catName = CParsingHelper.getDataBeetwenMarkers(cat, '<a href="#" class="drop">', '</a>', False)[1].strip() channels = re.findall('<a href="([^"]+?)">([^<]+?)<img src="http://wrzucaj.net/images/2014/09/12/flash-player-icon.png"', cat) if len(channels): self.teamCastTab[catName] = channels newItem = dict(cItem) newItem.update({'url':catName, 'title':catName + ' (%d)' % len(channels)}) self.addDir(newItem) elif url in self.teamCastTab: # List channels for item in self.teamCastTab[url]: newItem = dict(cItem) newItem.update({'url':item[0], 'title':item[1]}) self.playVideo(newItem) else: printExc()
def getWebCamera(self, cItem): printDBG("getWebCamera start") sts, data = self.cm.getPage(cItem['url']) if sts: if cItem['title'] == 'WebCamera PL': params = dict(cItem) params.update({'title':'Polecane kamery'}) self.addDir(params) data = CParsingHelper.getDataBeetwenMarkers(data, '<h4>Kamery wg kategorii</h4>', '</div>', False)[1] data = data.split('</a>') del data[-1] for item in data: url = self.cm.ph.getSearchGroups(item, """href=['"](http[^'^"]+?)['"]""")[0] if '' != url: params = dict(cItem) params.update({'title':self._cleanHtmlStr(item), 'url':url}) self.addDir(params) else: data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="inlinecam', '<div id="footerbar">', False)[1] data = data.split('<div class="inlinecam') for item in data: item = CParsingHelper.getDataBeetwenMarkers(item, '<a', '</div>', True)[1] url = self.cm.ph.getSearchGroups(item, """href=['"](http[^'^"]+?)['"]""")[0] if '' != url: title = self._cleanHtmlStr(CParsingHelper.getDataBeetwenMarkers(item, '<div class="bar">', '</div>', False)[1]) icon = self.cm.ph.getSearchGroups(item, """data-src=['"](http[^'^"]+?)['"]""")[0] params = dict(cItem) params.update({'title':title, 'url':url, 'icon':icon}) self.playVideo(params)
def _listHasBahCaList(self, cItem): printDBG("_listHasBahCaList") sts,data = self.cm.getPage(cItem['url'], {}, cItem.get('post', None)) if not sts: return data = self.cm.ph.getDataBeetwenMarkers(data, '<table class="inhalt">', '<div class="box_oben_r">Login</div>', True)[1] data = data.split('<table class="inhalt">') if len(data): del data[0] for item in data: rows = item.split('</tr>') if 0 == len(rows): continue title = self.cleanHtmlStr(rows[0]) del rows[0] if '' == title: title = self.cleanHtmlStr(rows[0]) del rows[0] desc = '' for tr in rows: tmp = self.cleanHtmlStr(tr) if '' == tmp: continue desc += tmp + ', ' url = self.cm.ph.getSearchGroups(item, ' title="DOWNLOAD" href="([^"]+?)"')[0] if '' != url: if '' != desc: desc = desc[:-2] params = {'name': 'HasBahCa', 'category':'hasbahca_resolve', 'title':title, 'url':url, 'desc':desc} self.addDir(params) if 1 == len(self.currList): item = self.currList[0] self.currList = [] self._listHasBahCaResolve(item)
def handleService(self, index, refresh = 0, searchPattern = '', searchType = ''): printDBG('handleService start') CBaseHostClass.handleService(self, index, refresh, searchPattern, searchType) name = self.currItem.get("name", '') category = self.currItem.get("category", '') printDBG( "handleService: |||||||||||||||||||||||||||||||||||| name[%s], category[%s] " % (name, category) ) self.currList = [] #MAIN MENU if name == None: self.listsTab(self.MAIN_CAT_TAB, {'name':'category'}) #MOVIES elif category == 'genres_movies': self.listFilters(self.currItem, 'category', 'list_version_filter') elif category == 'list_version_filter': self.listFilters(self.currItem, 'version', 'list_yer_filter') elif category == 'list_yer_filter': self.listFilters(self.currItem, 'year', 'list_movies') elif category == 'list_movies': self.listMovies(self.currItem) elif category == 'list_rank': self.listsTab(self.RANK_TAB, {'name':'category'}) elif category == 'list_rank_movie_view': self.listRankViewMovies(self.currItem) elif category == 'list_rank_movie': self.listRankMovie(self.currItem) elif category == 'list_rank_series_view': self.listRankViewSeries(self.currItem, 'list_seasons') #SERIES elif category == 'cat_series': self.listsTab(self.SERIES_CAT_TAB, {'name':'category'}) elif category == 'list_series_abc': self.listSeriesABC(self.currItem, 'list_series') elif category == 'list_series': self.listSeries(self.currItem, 'list_seasons') elif category == 'list_seasons': self.listSeasons(self.currItem, 'list_episodes') elif category == 'list_episodes': self.listEpisodes(self.currItem) #LATEST ADDED elif category == 'latest_added': self.listsTab(self.LAST_ADDED_TAB, {'name':'category'}) elif category == 'latest_added_movies': self.listLatestAddedMovies(self.currItem) elif category == 'latest_added_series': self.listLatestAddedSeries(self.currItem, 'list_seasons') #SEARCH elif category in ["search", "search_next_page"]: cItem = dict(self.currItem) cItem.update({'search_item':False, 'name':'category'}) self.listSearchResult(cItem, searchPattern, searchType) #HISTORIA SEARCH elif category == "search_history": self.listsHistory({'name':'history', 'category': 'search'}, 'desc', _("Type: ")) else: printExc() CBaseHostClass.endHandleService(self, index, refresh)
def listSeriesABC(self, cItem, category): printDBG("AlltubeTV.listSeriesABC") if 0 == len(self.seriesLetters): self.fillSeriesCache(self.MAIN_URL + 'seriale-online/') cItem = dict(cItem) cItem['category'] = category self.listsTab(self.seriesLetters, cItem)
def listEpisodes(self, cItem): printDBG("AlltubeTV.listEpisodes") seasonIdx = cItem.get('season_idx', -1) if seasonIdx >= 0 and seasonIdx < len(self.episodesCache): episodesList = self.episodesCache[seasonIdx] self.listsTab(episodesList, cItem, 'video')
def doDowanloadSubtitle(self, callback, subItem, tmpDir, subDir): self.outerCallback = callback self.tmpData = {"subItem": subItem, "tmpDir": tmpDir, "subDir": subDir} # subItem === private_data tmpFile = tmpDir + OpenSubOrgProvider.TMP_FILE_NAME self.filesToRemove.append(tmpFile) self.tmpData["tmpFile"] = tmpFile tmpFile = " '{0}' ".format(tmpFile) if not subItem.get("napisy_24", False): params = {"User-Agent": OpenSubOrgProvider.USER_AGENT} url = " '{0}' ".format(subItem["SubDownloadLink"]) cmd = DMHelper.getBaseWgetCmd(params) + url + " -O " + tmpFile + " > /dev/null 2>&1 " printDBG("doDowanloadSubtitle cmd[%s]" % cmd) self.iptv_sys = iptv_system(cmd, self._doDowanloadSubtitleCallback) else: tmpFileZip = self.tmpData["tmpFile"] + ".zip" self.tmpData["tmpFileZip"] = tmpFileZip self.filesToRemove.append(tmpFileZip) tmpFileZip = " '{0}' ".format(tmpFileZip) params = {"User-Agent": self.NAPISY24_USER_AGENT, "Referer": "http://napisy24.pl/"} url = "'http://napisy24.pl/run/pages/download.php?napisId={0}&typ=sr'".format(subItem["id"]) cmd = DMHelper.getBaseWgetCmd(params) + url + " -O " + tmpFileZip + " > /dev/null 2>&1 " printDBG("_doSearchSubtitleNapisy24Callback cmd[%s]" % cmd) self.iptv_sys = iptv_system(cmd, self._doDowanloadSubtitle24Callback)
def start(self, url, filePath, params = {}, info_from=None, retries=0): ''' Owervrite start from BaseDownloader ''' self.url = url self.filePath = filePath self.downloaderParams = params self.fileExtension = '' # should be implemented in future self.outData = '' self.contentType = 'unknown' if None == info_from: info_from = WgetDownloader.INFO.FROM_FILE self.infoFrom = info_from cmd = 'wget ' + '"' + self.url + '" -O "' + self.filePath + '" > /dev/null' printDBG("Download cmd[%s]" % cmd) self.console = eConsoleAppContainer() self.console_appClosed_conn = eConnectCallback(self.console.appClosed, self._cmdFinished) self.console.execute( cmd ) self.wgetStatus = self.WGET_STS.CONNECTING self.status = DMHelper.STS.DOWNLOADING self.onStart() return BaseDownloader.CODE_OK
def getChannelsList(self): printDBG("SatLiveApi.getChannelsList") # login premium = config.plugins.iptvplayer.satlivetv_premium.value login = config.plugins.iptvplayer.satlivetv_login.value password = config.plugins.iptvplayer.satlivetv_password.value if premium: if self.doLogin(login, password): self.loggedIn = True self.http_params.update({'save_cookie': True, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE}) else: self.sessionEx.open(MessageBox, _('Problem z zalogowanie użytkownika "%s. Sprawdź dane do logowania w konfiguracji hosta."') % login, type = MessageBox.TYPE_INFO, timeout = 10 ) # get channels channelsList = [] sts, data = self.cm.getPage(SatLiveApi.LIST_URL, self.http_params) if not sts: return [] data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="tab-pane active" id="live">', '<div class="tab-pane" id="all">', False)[1] data = data.split('</li>') for item in data: tmp = self.cm.ph.getSearchGroups(item, '<h4><a href="([^"]+?)">([^<]+?)</a></h4>', 2) if '' != tmp[0]: params = {} params['url'] = SatLiveApi.MAINURL + tmp[0] params['title'] = tmp[1] icon = self.cm.ph.getSearchGroups(item, '<img class="" src="([^"]+?)"')[0] if '' != icon: params['icon'] = SatLiveApi.MAINURL + icon params['desc'] = self.cm.ph.removeDoubles(clean_html(item.replace('>', '> ')).replace('\t', ' '), ' ') channelsList.append(params) return channelsList
def _doGetEpisodesCallback(self, code, data): sts = False list = [] promotItem = {} if code == 0: try: season = self.tmpData["episode_data"].get("season", -1) episode = self.tmpData["episode_data"].get("episode", -1) printDBG("_doGetEpisodesCallback s[%s] e[%s]" % (season, episode)) data = byteify(json.loads(data)) key, value = data.popitem() for item in value["episodes"]: params = dict(self.tmpData["private_data"]) params.update({"season": item["season"], "episode_title": item["name"], "episode": item["number"]}) title = "s{0}e{1} {2}".format( str(item["season"]).zfill(2), str(item["number"]).zfill(2), item["name"] ) if season == item["season"] and episode == item["number"] and promotItem == {}: promotItem = {"title": title, "private_data": params} else: list.append({"title": title, "private_data": params}) sts = True except: printExc() self.lastApiError = {"code": -999, "message": _("json load error 2")} if promotItem != {}: list.insert(0, promotItem) self.tmpData = {} self.outerCallback(sts, list)
def getSatLiveList(self, url): printDBG('getSatLiveList start') if None == self.satLiveApi: self.satLiveApi = SatLiveApi() tmpList = self.satLiveApi.getChannelsList() for item in tmpList: self.playVideo(item)
def listSportFilters(self, cItem, nextCategory): printDBG("EuroSportPlayer.listSportFilters [%s]" % cItem) try: sts, data = self.getPage(self.MENUBAR_URL) if not sts: return data = json_loads(data) menuData = {} for item in data['included']: if item['type'] == 'collection': if item['attributes']['alias'] == 'auto-taxonomy-container': printDBG(item['attributes']['title']) menuData = item['relationships']['items'] printDBG("-----------------------") printDBG(json_dumps(menuData)) printDBG("-----------------------") else: self.addItemInDB(item) for item in menuData['data']: #printDBG(json_dumps(self.espCollectionItems[item['id']])) node_id = self.espCollectionItems[item['id']]['relationships']['taxonomyNode']['data']['id'] node = self.espTaxonomyNodes[node_id] #printDBG(json_dumps(node)) iconData = self.espImages[node['relationships']['images']['data'][0]['id']] #printDBG(json_dumps(iconData)) route_id = node['relationships']['routes']['data'][0]['id'] routeData = self.espRoutes[route_id] #printDBG(json_dumps(routeData)) title = node['attributes']['name'] icon = iconData['attributes']['src'] url = self.getFullPath(routeData['attributes']['url'], 'route') params = {'good_for_fav':False, 'category':nextCategory, 'title':title, 'icon':icon, 'url': url} printDBG(str(params)) self.addDir(params) printDBG("-------------------------------------------") except Exception: printExc()
def getLinksForVideo(self, cItem): printDBG("DarshowCom.getLinksForVideo [%s]" % cItem) urlTab = [] sts, data = self.cm.getPage(cItem['url']) if not sts: return [] data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="tabs-wr">', '<div class="fullin">')[1] # get tabs names tmp = self.cm.ph.getDataBeetwenMarkers( data, '<div class="tabs-wr">', '<div class="box visible clearfix">')[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<li', '</li>') tabsNames = [] for item in tmp: tabsNames.append(self.cleanHtmlStr(item)) tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, 'clearfix">', '</div>', False) if len(tmp) == 1: data = [data.split('clearfix">')[-1]] else: data = tmp if 'trailer' not in cItem and len(data) != len(tabsNames): printDBG('>>>>>>>>>>>>>>>>>>>ERROR<<<<<<<<<<<<<<<<<<<<') printDBG( '>>>>>>>>>>>>>>>>>>> Something is wrong len(data)[%d] != len(tabsNames)[%d] !!!' % (len(data), len(tabsNames))) for idx in range(len(data)): item = data[idx] printDBG(item) if idx < len(tabsNames): tabName = tabsNames[idx] else: tabName = 'ERROR' url = '' if 'ViP' in tabName: # vip links are not supported continue elif 'روابط التحميل' in tabName: # download links not supported continue elif 'إعلان الفيلم' in tabName: # trailer url = self.cm.ph.getSearchGroups( item, '''file:[^"^']*?["'](http[^'^"]+?)["']''')[0] title = _('[Trailer]') + ' ' + tabName elif 'باقي السيرفرات' in tabName: # diffrents servers servers = re.compile( '''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>([^<]+?)<''' ).findall(item) for server in servers: url = self._getFullUrl(server[0]) title = tabName + ' ' + self.cleanHtmlStr(server[1]) if url.startswith('http'): urlTab.append({ 'name': title, 'url': strwithmeta(url, {'Referer': cItem['url']}), 'need_resolve': 1 }) url = '' elif 'iframe' in item: url = self.cm.ph.getSearchGroups( item, '''<iframe[^>]+?src=['"]([^"^']+?)['"]''', 1, True)[0] title = tabName url = self._getFullUrl(url) if url.startswith('http'): if title == 'ERROR': title = self.up.getHostName(url, nameOnly=True) params = { 'name': title, 'url': strwithmeta(url, {'Referer': cItem['url']}), 'need_resolve': 1 } if 'الإفتراضي' in title: #when default insert as first urlTab.insert(0, params) else: urlTab.append(params) return urlTab
def listItems(self, cItem, nextCategory='explore_item'): printDBG("DarshowCom.listItems") page = cItem.get('page', 1) post_data = cItem.get('post_data', None) url = cItem['url'] if page > 1: if post_data != None: post_data['search_start'] = page sts, data = self.cm.getPage(url, {}, post_data) if not sts: return mp = '<div class="navigation">' if mp not in data: mp = 'next-page' nextPageUrl = self.cm.ph.getDataBeetwenMarkers(data, mp, '</div>', False)[1] printDBG( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') printDBG(nextPageUrl) printDBG( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') if ('/{0}/'.format(page + 1)) in nextPageUrl: nextPageUrl = self.cm.ph.getSearchGroups( nextPageUrl, '''href=['"]([^"^']+?)['"]''')[0] else: nextPageUrl = '#' m1 = '<div class="shortmail">' data = self.cm.ph.getDataBeetwenMarkers(data, m1, 'container-content')[1] data = self.cm.ph.getAllItemsBeetwenMarkers(data, m1, '</span></span>') for item in data: title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, 'class="title-shorts">', '<', False)[1]) if title == '': title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, 'dir="rtl">', '<', False)[1]) if title == '': title = self.cm.ph.getSearchGroups( item, '''title=['"]([^"^']+?)['"]''')[0] if 'serie_title' in cItem: title = cItem['serie_title'] + ' - ' + title icon = self.cm.ph.getSearchGroups(item, '''src=['"]([^"^']+?)['"]''')[0] if icon.startswith('['): icon = '' else: icon = self._getFullUrl(icon) url = self._getFullUrl( self.cm.ph.getSearchGroups(item, '''href=['"]([^"^']+?)['"]''')[0]) if url.startswith('http'): params = {} params.update({ 'good_for_fav': False, 'title': self.cleanHtmlStr(title), 'url': url, 'icon': icon, 'desc': self.cleanHtmlStr(item) }) if nextCategory != 'video': params['category'] = nextCategory params['good_for_fav'] = True self.addDir(params) else: self.addVideo(params) if nextPageUrl != '#': params = dict(cItem) params.update({ 'good_for_fav': False, 'title': _('Next page'), 'url': nextPageUrl, 'page': page + 1 }) self.addDir(params)
def getLinksForVideo(self, cItem): printDBG("EuroSportPlayer.getLinksForVideo [%s]" % cItem) self.checkLogin() linksTab = [] try: printDBG(str(cItem)) video_id = cItem['video_id'] # open video page video_page_url = cItem['url'] sts, data = self.getPage( video_page_url, {'header' : {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] # open route json page route_id = cItem.get('route_id', '') if route_id: route = self.espRoutes[route_id] printDBG(json_dumps(route)) #{"attributes": {"url": "/videos/eurosport/world-championship-239400", "canonical": true}, "type": "route", "id": "292e72a63ebcccb480984a84f3497b7702623ab6fe6e7d7d29b1dce79ed3da35"} route_url = self.getFullPath(route['attributes']['url'], 'route') + "?include=default" sts, data = self.getPage(route_url) #if sts: #printDBG('--------------------------------') #printDBG(data) # open video playback json page playback_info_url = self.PLAYBACK_URL.replace('{%video_id%}', video_id) sts, data = self.getPage(playback_info_url, {'header' : {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }) if not sts: return [] printDBG('--------------------------------') printDBG(data) j = json_loads(data) s = j['data']['attributes']['streaming'] if 'hls' in s: link_url = strwithmeta(s['hls']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) linksTab.append({'name':'auto hls', 'url': link_url}) linksTab.extend(getDirectM3U8Playlist(link_url, checkExt=False, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999)) #if 'dash' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'dash', 'url': link_url}) #if 'mss' in s: # link_url = strwithmeta(s['dash']['url'], {'User-Agent': self.USER_AGENT, 'Referer' : video_page_url}) # linksTab.append({'name':'mss', 'url': link_url}) except Exception: printExc() return linksTab
def listSearchResult(self, cItem, searchPattern, searchType): printDBG("EuroSportPlayer.listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType)) params = dict(cItem) params.update({'category':'list_search_items', 'f_query':searchPattern}) self.listSearchItems(params)
def tryTologin(self): printDBG('EuroSportPlayer.tryTologin start') errorMsg = _('Error communicating with the server.') if None == self.loggedIn or self.login != config.plugins.iptvplayer.eurosportplayer_login.value or\ self.password != config.plugins.iptvplayer.eurosportplayer_password.value: self.login = config.plugins.iptvplayer.eurosportplayer_login.value self.password = config.plugins.iptvplayer.eurosportplayer_password.value rm(self.COOKIE_FILE) self.loggedIn = False self.loginMessage = '' if '' == self.login.strip() or '' == self.password.strip(): msg = _('The host %s requires subscription.\nPlease fill your login and password in the host configuration - available under blue button.') % self.getMainUrl() GetIPTVNotify().push(msg, 'info', 10) return False try: # get token tokenUrl = self.TOKEN_URL sts, data = self.getPage(tokenUrl) printDBG(data) # get config (also with catpcha site-key) sts, data = self.getPage(self.CONFIG_URL) printDBG(data) # solve captcha to login (token, errorMsgTab) = CaptchaHelper().processCaptcha(self.recaptcha_sitekey, self.LOGIN_URL) if not token: printDBG(str(errorMsgTab)) return printDBG('Captcha token :%s' % token) # try to login header = {'User-Agent': self.USER_AGENT, 'Referer': self.LOGIN_URL, 'x-disco-client': 'WEB:x86_64:WEB_AUTH:1.1.0', 'x-disco-recaptcha-token': token, 'content-type': 'application/json' } postData = {'credentials': {'username': self.login , 'password': self.password }} url = "https://eu3-prod-direct.eurosportplayer.com/login" httpParams = {'header' : header, 'with_metadata':True, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE, 'raw_post_data': True} sts, data = self.getPage(url, httpParams, post_data = json_dumps(postData)) ''' good login { "data" : { "attributes" : { "lastLoginTime" : "2019-11-01T21:45:15Z", "realm" : "eurosport", "token" : "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJVU0VSSUQ6ZXVyb3Nwb3J0OmI4OGQ0YTBhLWQwZDctNDdkZi1iMzI5LWJjNmM5ZDNiOTRjYyIsImp0aSI6InRva2VuLThkOWYxMDgwLWUwNGEtNDMyZi04NDY1LWUwYTgyNDljMjEwMyIsImFub255bW91cyI6ZmFsc2UsImlhdCI6MTU3MjY4NDk3MX0.DtSAY9kAVfwcJKhPXczRlPW3CACd6ZmZwZvJilIrlv8" }, "id" : "token-8d9f1080-e04a-432f-8465-e0a8249c2103", "type" : "token" }, "meta" : { "site" : { "attributes" : { "brand" : "eurosport", "websiteHostName" : "it.eurosportplayer.com" }, "id" : "IT", "type" : "site" } } } ''' ''' example: wrong password { "errors" : [ { "status" : "401", "code" : "unauthorized", "id" : "ATwRg09NZG", "detail" : "" } ] } ''' if not sts and '401' in str(data): msg = _('Login failed. Invalid email or password.') GetIPTVNotify().push(msg, 'error', 10) return False else: data = json_loads(data) printDBG(str(data)) self.loggedIn = True except Exception: printExc() printDBG('EuroSportPlayer.tryTologin end loggedIn[%s]' % self.loggedIn) return self.loggedIn
def checkLogin(self): printDBG('EuroSportPlayer.checkLogin start') # try to open account informations sts, data = self.getPage(self.USER_URL) if sts: data = json_loads(data) if data['data']['attributes']['anonymous']: printDBG("------------------------EUROSPORT------------------------------------") printDBG("connected as anonymous: login needed") printDBG("---------------------------------------------------------------------") self.tryTologin() else: printDBG("------------------------EUROSPORT------------------------------------") printDBG("Ok, connected as username: %s " % data['data']['attributes']['username']) printDBG("Last Login %s" % data['data']['attributes']['lastLoginTime']) printDBG("---------------------------------------------------------------------") if config.plugins.iptvplayer.eurosportplayer_login.value != data['data']['attributes']['username']: GetIPTVNotify().push(_("Username in settings is different from %s" % data['data']['attributes']['username']) + "\n" + _("Login needed"), 'error', 10) self.tryTologin() else: self.tryTologin()
def getArticleContent(self, cItem): printDBG("getArticleContent [%s]" % cItem) retTab = [] otherInfo = {} url = cItem.get('prev_url', '') if url == '': url = cItem.get('url', '') sts, data = self.getPage(url) if not sts: return retTab title = self.cleanHtmlStr(self.cm.ph.getSearchGroups(data, '''<meta[^>]+?itemprop="name"[^>]+?content="([^"]+?)"''')[0]) icon = self.cm.ph.getDataBeetwenMarkers(data, '<div id="poster"', '</div>')[1] icon = self.getFullIconUrl( self.cm.ph.getSearchGroups(icon, '''<img[^>]+?src=['"]([^"^']+?\.jpe?g[^"^']*?)["']''')[0] ) desc = self.cleanHtmlStr(self.cm.ph.getDataBeetwenReMarkers(data, re.compile('<div[^>]+?class="wp-content"[^>]*?>'), re.compile('</div>'))[1]) mapDesc = {'Original title': 'alternate_title', 'IMDb Rating':'imdb_rating', 'TMDb Rating':'tmdb_rating', 'Status':'status', 'Firt air date':'first_air_date', 'Last air date':'last_air_date', 'Seasons':'seasons', 'Episodes':'episodes'} tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, '<div class="custom_fields">', '</div>') for item in tmp: item = item.split('<span class="valor">') if len(item)<2: continue marker = self.cleanHtmlStr(item[0]) key = mapDesc.get(marker, '') if key == '': continue value = self.cleanHtmlStr(item[1]) if value != '': otherInfo[key] = value mapDesc = {'Director': 'directors', 'Cast':'cast', 'Creator':'creators'} tmp = self.cm.ph.getDataBeetwenReMarkers(data, re.compile('<div id="cast"[^>]+?>'), re.compile('fixidtab'))[1] tmp = self.cm.ph.rgetAllItemsBeetwenMarkers(tmp, '</div>', '<h2>') for item in tmp: marker = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(item, '<h2', '</h2>')[1]) key = mapDesc.get(marker, '') if key == '': continue item = self.cm.ph.getAllItemsBeetwenMarkers(item, '<div class="name">', '</div>') value = [] for t in item: t = self.cleanHtmlStr(t) if t != '': value.append(t) if len(value): otherInfo[key] = ', '.join(value) key = 'genres' tmp = self.cm.ph.getDataBeetwenMarkers(data, '<div class="sgeneros">', '</div>')[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<a', '</a>') value = [] for t in tmp: t = self.cleanHtmlStr(t) if t != '': value.append(t) if len(value): otherInfo[key] = ', '.join(value) tmp = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(data, '<div class="starstruck-rating">', '</div>')[1]) if tmp != '': otherInfo['rating'] = tmp tmp = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(data, '<span class="qualityx">', '</span>')[1]) if tmp != '': otherInfo['quality'] = tmp tmp = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(data, '<span class="country">', '</span>')[1]) if tmp != '': otherInfo['country'] = tmp tmp = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(data, '<span class="runtime">', '</span>')[1]) if tmp != '': otherInfo['duration'] = tmp if title == '': title = cItem['title'] if desc == '': desc = cItem.get('desc', '') if icon == '': icon = cItem.get('icon', self.DEFAULT_ICON_URL) return [{'title':self.cleanHtmlStr( title ), 'text': self.cleanHtmlStr( desc ), 'images':[{'title':'', 'url':self.getFullUrl(icon)}], 'other_info':otherInfo}]
def listSearchResult(self, cItem, searchPattern, searchType): printDBG("listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType)) cItem = dict(cItem) cItem['url'] = self.getFullUrl('/groups/group/search?ie=UTF-8&sa=Search&q=' + urllib.quote_plus(searchPattern)) self.listItems(cItem, 'explore_item')
def addVideoFromData(self, videoData, OnlyLive = False, label_format = None, future = False): # printDBG(json_dumps(videoData)) #{"relationships": { # "txSports": {"data": [{"type": "taxonomyNode", "id": "bec78875-c777-4b6b-aa5f-6f73093fef69"}]}, # "txCompetitions": {"data": [{"type": "taxonomyNode", "id": "3cc643aa-be3c-4bbc-b0bd-45537f4f9025"}]}, # "show": {"data": {"type": "show", "id": "5528"}}, # "contentPackages": {"data": [{"type": "package", "id": "Eurosport"}]}, # "primaryChannel": {"data": {"type": "channel", "id": "95"}}, # "txLegs": {"data": [{"type": "taxonomyNode", "id": "cdf73e0d-4662-4034-b238-87de281f89e5"}]}, # "routes": {"data": [{"type": "route", "id": "ba42a747696c2cc69574ee9414806703f3cc4271c97578ed68d795e81f526c3c"}]}, # "txMagazines": {"data": [{"type": "taxonomyNode", "id": "76f872af-e546-4a43-ac15-5a2512c36103"}]}, # "images": {"data": [{"type": "image", "id": "video_250797_80ec9f08-4b37-3033-994d-c492747cbdc7_default_it"}]}, # "txEvents": {"data": [{"type": "taxonomyNode", "id": "8aaff87f-4933-46e6-a9f3-872b336b2d8b"}]} # }, # "attributes": { # "availabilityWindows": [{"playableEnd": "2019-11-26T00:00:00Z", "playableStart": "2019-10-27T08:50:00Z", "package": "Eurosport"}], # "isNew": false, # "publishStart": "2019-10-20T00:00:00Z", # "contentRatings": [], # "alternateId": "world-cup-ambient-sound-250797", # "clearkeyEnabled": true, "secondaryTitle": "Coppa del Mondo di Sci alpino", # "drmEnabled": false, # "contentDescriptors": [], # "sourceSystemId": "eurosport-e14695126c0ch719", # "scheduleStart": "2019-10-27T08:50:00Z", # "description": "La Coppa del Mondo di sci alpino maschile 2019-20 prende il via in Austria, il primo appuntamento \u00e8 a S\u00f6lden.", # "videoDuration": 6060000, # "publishEnd": "2019-11-26T00:00:00Z", # "earliestPlayableStart": "2019-10-27T08:50:00Z", # "path": "eurosport/world-cup-ambient-sound-250797", # "packages": ["Eurosport"], # "videoType": "STANDALONE", # "name": "S\u00f6lden, Gigante uomini (1a manche)", # "rights": {"embeddable": false}, # "geoRestrictions": {"mode": "permit", "countries": ["world"]}, # "identifiers": {"epgId": "1041169", "originalMediaId": "64104745-4d53-40f8-aebd-ded76ebac868", "analyticsId": "e14695126c0ch719", "freewheel": "eurosport-e14695126c0ch719"}, # "scheduleEnd": "2019-10-27T10:50:00Z", # "customAttributes": {"classificationId": "24851"}, # "sourceSystemName": "vdp", # "playableTerritories": {"territories": ["de", "pt", "dk", "lt", "lu", "hr", "lv", "ua", "hu", "mc", "md", "me", "mf", "yt", "ie", "mk", "ee", "ad", "il", "im", "mq", "io", "mt", "is", "al", "it", "va", "am", "es", "vg", "at", "re", "nc", "ax", "az", "je", "ro", "nl", "ba", "no", "rs", "be", "fi", "ru", "bg", "fk", "fo", "fr", "wf", "fx", "se", "si", "by", "sk", "sm", "gb", "ge", "gf", "gg", "gi", "ko", "ch", "gl", "gp", "gr", "ta", "kz", "gy", "tf", "cy", "pf", "cz", "pl", "li", "pm", "tr"], "mode": "permit"}, # "isExpiring": false}, # "type": "video", # "id": "250797" #} params = {} video_id = videoData['id'] item_data = videoData['attributes'] printDBG(json_dumps(item_data)) if 'broadcastType' in item_data: #printDBG(" %s, %s , %s" % (item_data['name'], item_data['videoType'], item_data['broadcastType'] )) bt = item_data['broadcastType'] else: #printDBG(" %s, %s , %s" % (item_data['name'], item_data['videoType'], '' )) bt = item_data['videoType'] if (not OnlyLive) or (item_data['videoType'] == 'LIVE'): if 'scheduleStart' in item_data: start = item_data['scheduleStart'] else: start = item_data['earliestPlayableStart'] #printDBG("start: %s" % start) scheduleDate = self._gmt2local(start) #printDBG("local time: %s" % str(scheduleDate)) if scheduleDate < datetime.now() or future: txtDate=scheduleDate.strftime("%d/%m/%Y") txtTime=scheduleDate.strftime("%H:%M") #"routes": {"data": [{"type": "route", "id": "ba42a747696c2cc69574ee9414806703f3cc4271c97578ed68d795e81f526c3c"}]}, if 'routes' in videoData['relationships']: route_id = videoData['relationships']['routes']['data'][0]['id'] else: route_id = '' if label_format: if label_format == 'schedule': if 'txSports' in videoData['relationships']: sport_node_id = videoData['relationships']['txSports']['data'][0]['id'] sport = self.espTaxonomyNodes[sport_node_id] #printDBG(json_dumps(sport)) txtSport = sport['attributes']['name'] else: txtSport = '' if 'primaryChannel' in videoData['relationships']: channel_id = videoData['relationships']['primaryChannel']['data']['id'] channel = self.espChannels[channel_id] #printDBG(json_dumps(channel)) txtChannel = channel['attributes']['name'] else: txtChannel = '' if bt == 'LIVE': title =" %s %s - %s [%s]" % (txtTime, txtSport.upper(), item_data['name'], bt) else: title =" %s %s - %s - %s" % (txtTime, txtSport.upper(), item_data['name'], txtChannel) # elif altri casi else: title = item_data['name'] + " [%s] - (%s)" % (bt, txtDate ) else: title = item_data['name'] + " [%s] - (%s)" % (bt, txtDate ) desc = "video id: %s\n" % video_id if 'videoDuration' in item_data: desc = desc + _("Duration") + ": %s" % str(timedelta(seconds = int(item_data['videoDuration'] / 1000))) + "\n" if 'secondaryTitle' in item_data: desc = desc + item_data['secondaryTitle'] + "\n" desc = desc + item_data.get('description', '') icon_id = videoData['relationships']['images']['data'][0]['id'] icon = self.espImages[icon_id]['attributes']['src'] url = self.getFullPath(item_data['path'], 'video') params = {'title': title, 'desc': desc, 'url': url, 'icon': icon, 'video_id': video_id, 'schedule_date': scheduleDate, 'route_id': route_id} printDBG(str(params)) return params
def __init__(self): printDBG("TvGryPL.__init__") CBaseHostClass.__init__(self, {'history': 'TvGryPL.tv'})
def getLinksForVideo(self, cItem): printDBG("getLinksForVideo [%s]" % cItem) if 1 == self.up.checkHostSupport(cItem.get('url', '')): return self.up.getVideoLinkExt(cItem['url']) return self.cacheLinks.get(cItem['url'], [])
def saveWebFile(self, file_path, url, addParams={}, post_data=None): bRet = False downDataSize = 0 dictRet = {} try: outParams, postData = self.getParamsFromUrlWithMeta(url) addParams.update(outParams) if 'header' not in addParams and 'host' not in addParams: host = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18' header = { 'User-Agent': host, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' } addParams['header'] = header addParams['return_data'] = False sts, downHandler = self.getPage(url, addParams, post_data) if addParams.get('ignore_content_length', False): meta = downHandler.info() contentLength = int(meta.getheaders("Content-Length")[0]) else: contentLength = None checkFromFirstBytes = addParams.get('check_first_bytes', []) OK = True if 'maintype' in addParams and addParams[ 'maintype'] != downHandler.headers.maintype: printDBG( "common.getFile wrong maintype! requested[%r], retrieved[%r]" % (addParams['maintype'], downHandler.headers.maintype)) if 0 == len(checkFromFirstBytes): downHandler.close() OK = False if OK and 'subtypes' in addParams: OK = False for item in addParams['subtypes']: if item == downHandler.headers.subtype: OK = True break if OK or len(checkFromFirstBytes): blockSize = addParams.get('block_size', 8192) fileHandler = None while True: buffer = downHandler.read(blockSize) if len(checkFromFirstBytes): OK = False for item in checkFromFirstBytes: if buffer.startswith(item): OK = True break if not OK: break else: checkFromFirstBytes = [] if not buffer: break downDataSize += len(buffer) if len(buffer): if fileHandler == None: fileHandler = file(file_path, "wb") fileHandler.write(buffer) if fileHandler != None: fileHandler.close() downHandler.close() if None != contentLength: if contentLength == downDataSize: bRet = True elif downDataSize > 0: bRet = True except Exception: printExc("common.getFile download file exception") dictRet.update({'sts': bRet, 'fsize': downDataSize}) return dictRet
def exploreItem(self, cItem, nextCategory=''): printDBG("exploreItem") self.cacheLinks = {} sts, data = self.getPage(cItem['url']) if not sts: return trailer = None mainDesc = [] linksTab = [] tmp = self.cm.ph.getDataBeetwenReMarkers(data, re.compile('<div[^>]+?class="xg_module_body nopad"[^>]*?>'), re.compile('<div[^>]+?like[^>]+?>'))[1] items = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<a', '</a>') for item in items: url = self.cm.ph.getSearchGroups(item, '''href=['"]([^"^']+?)['"]''')[0] name = self.cleanHtmlStr(item) url = url.replace('youtu.be/', 'youtube.com/watch?v=') if 1 == self.up.checkHostSupport(url): if 'youtube' in url or 'trailer' in name.lower(): trailer = {'name':name, 'url':url, 'need_resolve':1} else: linksTab.append({'name':'%s - %s' % (name, self.up.getHostName(url)), 'url':url, 'need_resolve':1}) items = re.sub('''<a[^>]+?>[^>]*?</a>''', "", tmp) items = self.cm.ph.getAllItemsBeetwenMarkers(items, '<p', '</p>') for item in items: item = self.cleanHtmlStr(item) if item != '': mainDesc.append(item) mainDesc.append(self.cleanHtmlStr(tmp.split('<span id="groups121">', 1)[-1])) mainDesc = '[/br]'.join(mainDesc) # trailer if trailer != None: title = '%s - %s' %(cItem['title'], _('TRAILER')) params = dict(cItem) params.update({'good_for_fav': False, 'title':title, 'url':trailer['url'], 'desc':trailer['name']}) self.addVideo(params) if len(linksTab): self.cacheLinks[cItem['url']] = linksTab params = dict(cItem) params.update({'good_for_fav': False, 'desc':mainDesc}) self.addVideo(params) else: self.cacheSeasons = {} data = self.cm.ph.getDataBeetwenMarkers(data, 'html_module module_text', '<div class="xg_module">')[1].split('<p>', 1)[-1] tmp = re.compile('''>(Season[^<]*?)<''', re.IGNORECASE).split(data) if len(tmp) > 1: printDBG(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SEASON %s" % len(tmp[0])) for idx in range(2, len(tmp), 2): sTitle = self.cleanHtmlStr( tmp[idx-1] ) sNum = self.cm.ph.getSearchGroups(sTitle, '[^0-9]([0-9]+)')[0] episodesList = [] episodesLinks = {} sItem = self.cm.ph.getAllItemsBeetwenMarkers(tmp[idx], '<a', '</a>') for item in sItem: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+?)['"]''')[0]) if 1 != self.up.checkHostSupport(url): continue title = self.cleanHtmlStr(item) if title not in episodesList: episodesList.append(title) episodesLinks[title] = [] episodesLinks[title].append({'name':self.up.getHostName(url), 'url':url, 'need_resolve':1}) if len(episodesList): params = dict(cItem) params.update({'good_for_fav': False, 'category':nextCategory, 'series_title':cItem['title'], 's_num':sNum, 'title':sTitle, 'e_list':episodesList, 'e_links':episodesLinks, 'desc':mainDesc}) self.addDir(params) return tmp = re.split('''(<img[^>]+?\.jpg[^>]+?>)''', data) if len(data) > 1: # collection for idx in range(1, len(tmp), 1): printDBG("++++++++++++++++++++++++++++++++++++++++") item = tmp[idx] title = self.cleanHtmlStr(self.cm.ph.getDataBeetwenReMarkers(item, re.compile('<span[^>]+?style="text-decoration: underline;"[^>]*?>'), re.compile('</span>'))[1]) if title == '': title = self.cleanHtmlStr(self.cm.ph.rgetDataBeetwenMarkers2(item, '</a>', '<a')[1]) if title == '': title = self.cleanHtmlStr(item) icon = self.getFullIconUrl(self.cm.ph.getSearchGroups(tmp[idx-1], '''src=['"]([^'^"]+?)['"]''')[0]) if icon == '': icon = cItem.get('icon', '') linksTab = [] item = self.cm.ph.getAllItemsBeetwenMarkers(item, '<a', '</a>') for it in item: url = self.cm.ph.getSearchGroups(it, '''href=['"]([^"^']+?)['"]''')[0] printDBG(">>> [%s]" % url) url = re.split('''(https?://)''', url) if len(url) > 1: url = url[-2] + url[-1] else: continue name = self.cleanHtmlStr(it) if 0 == len(linksTab) and 'gamato' in url and '/group/' in url: url = self.getFullUrl('/group/' + url.split('/group/')[-1]) params = dict(cItem) params.update({'good_for_fav': True, 'url':url, 'title':title, 'icon':icon, 'desc':mainDesc}) self.addDir(params) break elif 1 == self.up.checkHostSupport(url): linksTab.append({'name':self.up.getHostName(url), 'url':url, 'need_resolve':1}) if len(linksTab): url = cItem['url'] + '&title=' + title self.cacheLinks[url] = linksTab params = dict(cItem) params.update({'good_for_fav': False, 'url':url, 'title':title, 'icon':icon, 'desc':mainDesc}) self.addVideo(params)
if 'content-type' in metadata: encoding = self.ph.getSearchGroups( metadata['content-type'], '''charset=([A-Za-z0-9\-]+)''', 1, True)[0].strip().upper() if encoding == '' and params.get('search_charset', False): encoding = self.ph.getSearchGroups( out_data, '''(<meta[^>]+?Content-Type[^>]+?>)''', ignoreCase=True)[0] encoding = self.ph.getSearchGroups( encoding, '''charset=([A-Za-z0-9\-]+)''', 1, True)[0].strip().upper() if encoding not in ['', 'UTF-8']: printDBG(">> encoding[%s]" % encoding) try: out_data = out_data.decode(encoding).encode('UTF-8') except Exception: printExc() metadata['orig_charset'] = encoding if params.get('with_metadata', False) and params.get( 'return_data', False): out_data = strwithmeta(out_data, metadata) return out_data def urlEncodeNonAscii(self, b): return re.sub('[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b)
def getURLRequestData(self, params={}, post_data=None): def urlOpen(req, customOpeners, timeout): if len(customOpeners) > 0: opener = urllib2.build_opener(*customOpeners) if timeout != None: response = opener.open(req, timeout=timeout) else: response = opener.open(req) else: if timeout != None: response = urllib2.urlopen(req, timeout=timeout) else: response = urllib2.urlopen(req) return response if IsMainThread(): msg1 = _( 'It is not allowed to call getURLRequestData from main thread.' ) msg2 = _( 'You should never perform block I/O operations in the __init__.' ) msg3 = _( 'In next release exception will be thrown instead of this message!' ) GetIPTVNotify().push('%s\n\n%s\n\n%s' % (msg1, msg2, msg3), 'error', 40) if not self.useMozillaCookieJar: cj = cookielib.LWPCookieJar() else: cj = cookielib.MozillaCookieJar() response = None req = None out_data = None opener = None metadata = None timeout = params.get('timeout', None) if 'host' in params: host = params['host'] else: host = self.HOST if 'header' in params: headers = params['header'] elif None != self.HEADER: headers = self.HEADER else: headers = {'User-Agent': host} if 'User-Agent' not in headers: headers['User-Agent'] = host metadata = {} printDBG('pCommon - getURLRequestData() -> params: ' + str(params)) printDBG('pCommon - getURLRequestData() -> headers: ' + str(headers)) customOpeners = [] #cookie support if 'use_cookie' not in params and 'cookiefile' in params and ( 'load_cookie' in params or 'save_cookie' in params): params['use_cookie'] = True if params.get('use_cookie', False): if params.get('load_cookie', False): try: cj.load(params['cookiefile'], ignore_discard=True) except IOError: printDBG('Cookie file [%s] not exists' % params['cookiefile']) except Exception: printExc() try: for cookieKey in params.get('cookie_items', {}).keys(): printDBG("cookie_item[%s=%s]" % (cookieKey, params['cookie_items'][cookieKey])) cookieItem = cookielib.Cookie( version=0, name=cookieKey, value=params['cookie_items'][cookieKey], port=None, port_specified=False, domain='', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False) cj.set_cookie(cookieItem) except Exception: printExc() customOpeners.append(urllib2.HTTPCookieProcessor(cj)) if params.get('no_redirection', False): customOpeners.append(NoRedirection()) # debug #customOpeners.append(urllib2.HTTPSHandler(debuglevel=1)) #customOpeners.append(urllib2.HTTPHandler(debuglevel=1)) if not IsHttpsCertValidationEnabled(): try: if params.get('ssl_protocol', None) != None: ctx = ssl._create_unverified_context( params['ssl_protocol']) else: ctx = ssl._create_unverified_context() customOpeners.append(urllib2.HTTPSHandler(context=ctx)) except Exception: pass elif params.get('ssl_protocol', None) != None: ctx = ssl.SSLContext(params['ssl_protocol']) customOpeners.append(urllib2.HTTPSHandler(context=ctx)) #proxy support if self.useProxy: http_proxy = self.proxyURL else: http_proxy = '' #proxy from parameters (if available) overwrite default one if 'http_proxy' in params: http_proxy = params['http_proxy'] if '' != http_proxy: printDBG('getURLRequestData USE PROXY') customOpeners.append(urllib2.ProxyHandler({"http": http_proxy})) customOpeners.append(urllib2.ProxyHandler({"https": http_proxy})) pageUrl = params['url'] proxy_gateway = params.get('proxy_gateway', '') if proxy_gateway != '': pageUrl = proxy_gateway.format(urllib.quote_plus(pageUrl, '')) printDBG("pageUrl: [%s]" % pageUrl) if None != post_data: printDBG('pCommon - getURLRequestData() -> post data: ' + str(post_data)) if params.get('raw_post_data', False): dataPost = post_data elif params.get('multipart_post_data', False): customOpeners.append(MultipartPostHandler()) dataPost = post_data else: dataPost = urllib.urlencode(post_data) req = urllib2.Request(pageUrl, dataPost, headers) else: req = urllib2.Request(pageUrl, None, headers) if not params.get('return_data', False): out_data = urlOpen(req, customOpeners, timeout) else: gzip_encoding = False try: response = urlOpen(req, customOpeners, timeout) if response.info().get('Content-Encoding') == 'gzip': gzip_encoding = True try: metadata['url'] = response.geturl() metadata['status_code'] = response.getcode() if 'Content-Type' in response.info(): metadata['content-type'] = response.info( )['Content-Type'] except Exception: pass data = response.read() response.close() except urllib2.HTTPError, e: ignoreCodeRanges = params.get('ignore_http_code_ranges', [(404, 404), (500, 500)]) ignoreCode = False metadata['status_code'] = e.code for ignoreCodeRange in ignoreCodeRanges: if e.code >= ignoreCodeRange[ 0] and e.code <= ignoreCodeRange[1]: ignoreCode = True break if ignoreCode: printDBG('!!!!!!!! %s: getURLRequestData - handled' % e.code) if e.fp.info().get('Content-Encoding', '') == 'gzip': gzip_encoding = True try: metadata['url'] = e.fp.geturl() if 'Content-Type' in e.fp.info(): metadata['content-type'] = e.fp.info( )['Content-Type'] except Exception: pass data = e.fp.read() #e.msg #e.headers elif e.code == 503: if params.get('use_cookie', False): new_cookie = e.fp.info().get('Set-Cookie', '') printDBG("> new_cookie[%s]" % new_cookie) cj.save(params['cookiefile'], ignore_discard=True) raise e else: if e.code in [300, 302, 303, 307] and params.get( 'use_cookie', False) and params.get( 'save_cookie', False): new_cookie = e.fp.info().get('Set-Cookie', '') printDBG("> new_cookie[%s]" % new_cookie) #for cookieKey in params.get('cookie_items', {}).keys(): # cj.clear('', '/', cookieKey) cj.save(params['cookiefile'], ignore_discard=True) raise e try: if gzip_encoding: printDBG('Content-Encoding == gzip') buf = StringIO(data) f = gzip.GzipFile(fileobj=buf) out_data = f.read() else: out_data = data except Exception as e: printExc() msg1 = _( "Critical Error – Content-Encoding gzip cannot be handled!" ) msg2 = _("Last error:\n%s" % str(e)) GetIPTVNotify().push('%s\n\n%s' % (msg1, msg2), 'error', 20) out_data = data
def listMainMenu(self, cItem): printDBG("KinogoCC.listMainMenu") sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(data.meta['url']) tmp = self.cm.ph.getDataBeetwenNodes(data, ('<table', '>', 'menu'), ('</table', '>'), False)[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<a', '</a>') for item in tmp: url = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+?)['"]''')[0]) if not url.endswith('/'): continue title = self.cleanHtmlStr(item) params = dict(cItem) params.update({ 'good_for_fav': True, 'category': 'list_items', 'title': title, 'url': url }) self.addDir(params) data = self.cm.ph.getAllItemsBeetwenNodes(data, ('<div', '>', '"mini"'), ('</div', '>'), False) for item in data: sTitle = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<i', '</i>')[1]) printDBG("> sTitle[%s]" % sTitle) subItems = [] item = self.cm.ph.getAllItemsBeetwenMarkers(item, '<a', '</a>') for it in item: url = self.getFullUrl( self.cm.ph.getSearchGroups( it, '''href=['"]([^'^"]+?)['"]''')[0]) title = self.cleanHtmlStr(it) printDBG("\t> title[%s]" % title) params = dict(cItem) params.update({ 'good_for_fav': True, 'category': 'list_items', 'title': title, 'url': url }) subItems.append(params) if len(subItems): params = dict(cItem) params.update({ 'good_for_fav': True, 'category': 'sub_items', 'title': sTitle, 'sub_items': subItems }) self.addDir(params) MAIN_CAT_TAB = [ { 'category': 'search', 'title': _('Search'), 'search_item': True }, { 'category': 'search_history', 'title': _('Search history') }, ] self.listsTab(MAIN_CAT_TAB, cItem)
def getPageCFProtection(self, baseUrl, params={}, post_data=None): cfParams = params.get('cloudflare_params', {}) def _getFullUrlEmpty(url): return url _getFullUrl = cfParams.get('full_url_handle', _getFullUrlEmpty) _getFullUrl2 = cfParams.get('full_url_handle2', _getFullUrlEmpty) url = baseUrl header = { 'Referer': url, 'User-Agent': cfParams.get('User-Agent', ''), 'Accept-Encoding': 'text' } header.update(params.get('header', {})) params.update({ 'with_metadata': True, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': cfParams.get('cookie_file', ''), 'header': header }) sts, data = self.getPage(url, params, post_data) current = 0 while current < 5: #if True: if not sts and None != data: start_time = time.time() current += 1 doRefresh = False try: domain = self.getBaseUrl(data.fp.geturl()) verData = data.fp.read() if data.fp.info().get('Content-Encoding', '') == 'gzip': verData = DecodeGzipped(verData) printDBG("------------------") printDBG(verData) printDBG("------------------") if 'sitekey' not in verData and 'challenge' not in verData: break printDBG(">>") printDBG(verData) printDBG("<<") sitekey = self.ph.getSearchGroups( verData, 'data-sitekey="([^"]+?)"')[0] id = self.ph.getSearchGroups(verData, 'data-ray="([^"]+?)"')[0] if sitekey != '': from Plugins.Extensions.IPTVPlayer.libs.recaptcha_v2 import UnCaptchaReCaptcha # google captcha recaptcha = UnCaptchaReCaptcha(lang=GetDefaultLang()) recaptcha.HTTP_HEADER['Referer'] = baseUrl if '' != cfParams.get('User-Agent', ''): recaptcha.HTTP_HEADER['User-Agent'] = cfParams[ 'User-Agent'] token = recaptcha.processCaptcha(sitekey) if token == '': return False, None sts, tmp = self.ph.getDataBeetwenMarkers( verData, '<form', '</form>', caseSensitive=False) if not sts: return False, None url = self.ph.getSearchGroups(tmp, 'action="([^"]+?)"')[0] if url != '': url = _getFullUrl(url) else: url = data.fp.geturl() actionType = self.ph.getSearchGroups( tmp, 'method="([^"]+?)"', 1, True)[0].lower() post_data2 = dict( re.findall( r'<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"[^>]*>', tmp)) #post_data2['id'] = id if '' != token: post_data2['g-recaptcha-response'] = token else: continue params2 = dict(params) params2['header'] = dict(params['header']) params2['header']['Referer'] = baseUrl if actionType == 'get': if '?' in url: url += '&' else: url += '?' url += urllib.urlencode(post_data2) post_data2 = None sts, data = self.getPage(url, params2, post_data2) printDBG( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" ) printDBG(sts) printDBG( "------------------------------------------------------------------" ) printDBG(data) printDBG( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" ) else: dat = self.ph.getAllItemsBeetwenNodes( verData, ('<script', '>'), ('</script', '>'), False) for item in dat: if 'setTimeout' in item and 'submit()' in item: dat = item break decoded = '' jscode = base64.b64decode( '''ZnVuY3Rpb24gc2V0VGltZW91dCh0LGUpe2lwdHZfcmV0LnRpbWVvdXQ9ZSx0KCl9dmFyIGlwdHZfcmV0PXt9LGlwdHZfZnVuPW51bGwsZG9jdW1lbnQ9e30sd2luZG93PXRoaXMsZWxlbWVudD1mdW5jdGlvbih0KXt0aGlzLl9uYW1lPXQsdGhpcy5fc3JjPSIiLHRoaXMuX2lubmVySFRNTD0iIix0aGlzLl9wYXJlbnRFbGVtZW50PSIiLHRoaXMuc2hvdz1mdW5jdGlvbigpe30sdGhpcy5hdHRyPWZ1bmN0aW9uKHQsZSl7cmV0dXJuInNyYyI9PXQmJiIjdmlkZW8iPT10aGlzLl9uYW1lJiZpcHR2X3NyY2VzLnB1c2goZSksdGhpc30sdGhpcy5maXJzdENoaWxkPXtocmVmOmlwdHZfZG9tYWlufSx0aGlzLnN0eWxlPXtkaXNwbGF5OiIifSx0aGlzLnN1Ym1pdD1mdW5jdGlvbigpe3ByaW50KEpTT04uc3RyaW5naWZ5KGlwdHZfcmV0KSl9LE9iamVjdC5kZWZpbmVQcm9wZXJ0eSh0aGlzLCJzcmMiLHtnZXQ6ZnVuY3Rpb24oKXtyZXR1cm4gdGhpcy5fc3JjfSxzZXQ6ZnVuY3Rpb24odCl7dGhpcy5fc3JjPXR9fSksT2JqZWN0LmRlZmluZVByb3BlcnR5KHRoaXMsImlubmVySFRNTCIse2dldDpmdW5jdGlvbigpe3JldHVybiB0aGlzLl9pbm5lckhUTUx9LHNldDpmdW5jdGlvbih0KXt0aGlzLl9pbm5lckhUTUw9dH19KSxPYmplY3QuZGVmaW5lUHJvcGVydHkodGhpcywidmFsdWUiLHtnZXQ6ZnVuY3Rpb24oKXtyZXR1cm4iIn0sc2V0OmZ1bmN0aW9uKHQpe2lwdHZfcmV0LmFuc3dlcj10fX0pfSwkPWZ1bmN0aW9uKHQpe3JldHVybiBuZXcgZWxlbWVudCh0KX07ZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQ9ZnVuY3Rpb24odCl7cmV0dXJuIG5ldyBlbGVtZW50KHQpfSxkb2N1bWVudC5jcmVhdGVFbGVtZW50PWZ1bmN0aW9uKHQpe3JldHVybiBuZXcgZWxlbWVudCh0KX0sZG9jdW1lbnQuYXR0YWNoRXZlbnQ9ZnVuY3Rpb24oKXtpcHR2X2Z1bj1hcmd1bWVudHNbMV19Ow==''' ) jscode = "var location = {hash:''}; var iptv_domain='%s';\n%s\n%s\niptv_fun();" % ( domain, jscode, dat) #cfParams['domain'] printDBG("+ CODE +") printDBG(jscode) printDBG("++++++++") ret = iptv_js_execute(jscode) decoded = byteify(json.loads(ret['data'].strip())) verData = self.ph.getDataBeetwenReMarkers( verData, re.compile('<form[^>]+?id="challenge-form"'), re.compile('</form>'), False)[1] printDBG(">>") printDBG(verData) printDBG("<<") verUrl = _getFullUrl( self.ph.getSearchGroups(verData, 'action="([^"]+?)"')[0]) get_data = dict( re.findall( r'<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"[^>]*>', verData)) get_data['jschl_answer'] = decoded['answer'] verUrl += '?' for key in get_data: verUrl += '%s=%s&' % (key, get_data[key]) verUrl = _getFullUrl( self.ph.getSearchGroups(verData, 'action="([^"]+?)"') [0]) + '?jschl_vc=%s&pass=%s&jschl_answer=%s' % ( get_data['jschl_vc'], get_data['pass'], get_data['jschl_answer']) verUrl = _getFullUrl2(verUrl) params2 = dict(params) params2['load_cookie'] = True params2['save_cookie'] = True params2['header'] = dict(params.get('header', {})) params2['header'].update({ 'Referer': url, 'User-Agent': cfParams.get('User-Agent', ''), 'Accept-Encoding': 'text' }) printDBG("Time spent: [%s]" % (time.time() - start_time)) if current == 1: GetIPTVSleep().Sleep(1 + (decoded['timeout'] / 1000.0) - (time.time() - start_time)) else: GetIPTVSleep().Sleep((decoded['timeout'] / 1000.0)) printDBG("Time spent: [%s]" % (time.time() - start_time)) printDBG("Timeout: [%s]" % decoded['timeout']) sts, data = self.getPage(verUrl, params2, post_data) except Exception: printExc() break else: break return sts, data
def listItems(self, cItem, nextCategory): printDBG("KinogoCC.listItems") page = cItem.get('page', 1) post_data = cItem.get('post_data', None) sts, data = self.getPage(cItem['url'], post_data=post_data) if not sts: return self.setMainUrl(data.meta['url']) nextPage = self.cm.ph.getDataBeetwenNodes( data, ('<div', '>', 'bot-navigation'), ('</div', '>'))[1] nextPage = self.cm.ph.getSearchGroups( nextPage, '''<a[^>]+?href=['"]([^"^']+?)['"][^>]*?>\s*?{0}\s*?<'''.format( page + 1))[0] commentReObj = re.compile('<!--[\s\S]*?-->') brReObj = re.compile('<[\s/]*?br\s[\s/]*>', re.I) data = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'shortstorytitle'), ('<div', '>', 'pomoshnik'), False)[1] data = re.compile('<div[^>]+?shortstorytitle[^>]+?>').split(data) for item in data: rating = self.cleanHtmlStr( self.cm.ph.getDataBeetwenNodes(item, ('<', '>', 'current-rating'), ('</', '>'), False)[1]) date = self.cleanHtmlStr( self.cm.ph.getDataBeetwenNodes(item, ('<', '>', 'dateicon'), ('</', '>'), False)[1]) url = self.cm.ph.getDataBeetwenNodes(item, ('<h', '>', 'zagolovki'), ('</h', '>'), False)[1] title = self.cleanHtmlStr(url) url = self.getFullUrl( self.cm.ph.getSearchGroups(url, '''href=['"]([^"^']+?)['"]''')[0]) item = self.cm.ph.getDataBeetwenNodes(item, ('<div', '>', 'shortimg'), ('<div', '>', 'icons'), False)[1] icon = self.getFullIconUrl( self.cm.ph.getSearchGroups(item, '''src=['"]([^"^']+?)['"]''')[0]) descTab = [] item = commentReObj.sub("", item) item = brReObj.split(item) for idx in range(len(item)): t = self.cleanHtmlStr(item[idx]) if t == '': continue if len(descTab) == 0: descTab.append(t) else: descTab.insert(1, t) descTab.append('Дата: %s, %s/5' % (date, rating)) params = { 'good_for_fav': True, 'category': nextCategory, 'url': url, 'title': title, 'desc': '[/br]'.join(descTab[::-1]), 'icon': icon } self.addDir(params) if nextPage != '': params = dict(cItem) if nextPage != '#': params.update({ 'title': _('Next page'), 'url': self.getFullUrl(nextPage), 'page': page + 1 }) self.addDir(params) elif post_data != None: params['post_data'].update({ 'search_start': page + 1, 'result_from': (page) * 30 + 1 }) params.update({'title': _('Next page'), 'page': page + 1}) self.addDir(params)
def getPage(self, baseUrl, addParams={}, post_data=None): if addParams == {}: addParams = dict(self.defaultParams) sts, data = self.cm.getPage(baseUrl, addParams, post_data) if not data: data = '' printDBG('ddddaaattttaaaa' + str(data.meta)) printDBG('ddddaaattttaaaa' + data) if ('!![]+!![]' in data) or (data.meta.get('status_code', 0) == 503): if True: #try: if os.path.exists(self.COOKIE_FILE): os.remove(self.COOKIE_FILE) printDBG('cookie removed') printDBG('Start CLoudflare Vstream methode') oRequestHandler = cRequestHandler(baseUrl) if post_data: post_data_vstream = '' for key in post_data: if post_data_vstream == '': post_data_vstream = key + '=' + post_data[key] else: post_data_vstream = post_data_vstream + '&' + key + '=' + post_data[ key] oRequestHandler.setRequestType( cRequestHandler.REQUEST_TYPE_POST) oRequestHandler.addParametersLine(post_data_vstream) data = oRequestHandler.request() sts = True printDBG('cook_vstream_file=' + self.up.getDomain(baseUrl).replace('.', '_')) cook = GestionCookie().Readcookie( self.up.getDomain(baseUrl).replace('.', '_')) printDBG('cook_vstream=' + cook) if ';' in cook: cook_tab = cook.split(';') else: cook_tab = cook cj = self.cm.getCookie(self.COOKIE_FILE) for item in cook_tab: if '=' in item: printDBG('item=' + item) cookieKey, cookieValue = item.split('=') cookieItem = cookielib.Cookie( version=0, name=cookieKey, value=cookieValue, port=None, port_specified=False, domain='.' + self.cm.getBaseUrl(baseUrl, True), domain_specified=True, domain_initial_dot=True, path='/', path_specified=True, secure=False, expires=time.time() + 3600 * 48, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False) cj.set_cookie(cookieItem) cj.save(self.COOKIE_FILE, ignore_discard=True) else: #except Exception, e: printDBG('ERREUR:' + str(e)) printDBG('Start CLoudflare E2iplayer methode') addParams['cloudflare_params'] = { 'domain': self.up.getDomain(baseUrl), 'cookie_file': self.COOKIE_FILE, 'User-Agent': self.USER_AGENT } sts, data = self.cm.getPageCFProtection( baseUrl, addParams, post_data) return sts, data
def isAgeGateAllowed(): value = config.plugins.iptvplayer.ytAgeGate.value printDBG("ALLOW Age-Gate bypass: >> %s" % value) return value
def exploreItem(self, cItem, nextCategory): printDBG("KinogoCC.listItems") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(data.meta['url']) data = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'section'), ('<', '>', 'social'))[1] printDBG(data) titles = self.cm.ph.getDataBeetwenNodes(data, ('<ul', '>', 'tabs'), ('</ul', '>'))[1] titles = self.cm.ph.getAllItemsBeetwenMarkers(titles, '<li', '</li>') if len(titles) == 0: titles.append(_('watch')) if len(titles) < 2: titles.append(_('trailer')) # trailer iTrailer = self.cm.ph.getSearchGroups( data, '''['"](https?://[^'^"]*?youtube[^'^"]*?watch[^'^"]*?)['"]''')[0] if iTrailer != '': params = dict(cItem) params.update({ 'good_for_fav': False, 'url': iTrailer, 'title': '%s - %s' % (cItem['title'], self.cleanHtmlStr(titles[-1])) }) self.addVideo(params) # watch online tmp = self.cm.ph.getDataBeetwenMarkers(data, 'Base64.decode(', ')', False)[1].strip() if tmp != '': try: data = base64.b64decode(tmp[1:-1]) data = self.cm.ph.getSearchGroups( data, '''(<param[^>]+?flashvars[^>]+?>)''', ignoreCase=True)[0] data = self.cm.ph.getSearchGroups( data, '''value=['"]([^'^"]+?)['"]''')[0] data = data.split('&') fileMarker = 'file=' playlistMarker = 'pl=' for item in data: item = item.strip() if item.startswith(fileMarker): url = item[len(fileMarker):] printDBG(">> url[%s]" % url) tmp = url.lower().split('?', 1)[0] if self.cm.isValidUrl(url) and \ tmp.split('.')[-1] in ['flv', 'mp4']: params = dict(cItem) params.update({'good_for_fav': False, 'url': url}) self.addVideo(params) if item.startswith(playlistMarker): url = item[len(playlistMarker):] printDBG(">> url[%s]" % url) tmp = url.lower().split('?', 1)[0] if self.cm.isValidUrl(url) and \ tmp.endswith('.txt'): urlParams = dict(self.defaultParams) urlParams['convert_charset'] = False sts, tmp = self.getPage(url, urlParams) if not sts: continue printDBG(">>\n%s\n<<" % tmp) tmp = tmp.split('},') for item in tmp: title = self.cleanHtmlStr( self.cm.ph.getSearchGroups( item, '''['"]comment['"]\s*?:\s*?['"]([^'^"]+?)['"]''' )[0]) url = self.cm.ph.getSearchGroups( item, '''['"]file['"]\s*?:\s*?['"](https?://[^'^"]+?)['"]''' )[0] if url == '': continue params = dict(cItem) params.update({ 'good_for_fav': False, 'title': '%s %s' % (cItem['title'], title), 'url': url }) self.addVideo(params) except Exception: printExc() else: urlsTab = [] iframes = self.cm.ph.getAllItemsBeetwenMarkers( data, '<iframe', '</iframe>') for iframe in iframes: url = self.cm.ph.getSearchGroups( iframe, '''src=["']([^"^']+?)["']''')[0] if self.cm.isValidUrl(url): params = self.up.getVideoLinkExt(url) printDBG(str(params)) urlsTab.extend(params) data = re.compile( '''['"]?file['"]?\s*?:\s*?['"](https?://[^'^"]+?(?:\.flv|\.mp4)(?:\?[^'^"]*?)?)['"]''', re.I).findall(data) for item in data: name = item.split('?', 1)[0].split('.')[-1] params = { 'name': name, 'url': strwithmeta(item, {'Referer': self.getMainUrl()}), 'need_resolve': 0 } if name == 'flv': urlsTab.insert(0, params) else: urlsTab.append(params) if len(urlsTab): params = dict(cItem) params.update({'good_for_fav': False, 'urls_tab': urlsTab}) self.addVideo(params)
def getSearchResult(self, pattern, searchType, page, nextPageCategory, sortBy=''): printDBG( 'YouTubeParser.getSearchResult pattern[%s], searchType[%s], page[%s]' % (pattern, searchType, page)) currList = [] try: url = 'http://www.youtube.com/results?search_query=%s&filters=%s&search_sort=%s&page=%s' % ( pattern, searchType, sortBy, page) sts, data = self.cm.getPage(url, self.http_params) #printDBG('YouTubeParser.getSearchResult pattern[%s], searchType[%s], page[%s]' % (pattern, searchType, data)) if sts: if data: data2 = self.cm.ph.getAllItemsBeetwenMarkers( data, '"videoRenderer"', '}},') for item in data2: tmp = item.replace('}},', '}}}').replace('\u0026', '&') icon = self.cm.ph.getSearchGroups( item, '''"url"\s*:\s*"([^"]+?)"''')[0] url = 'http://www.youtube.com/watch?v=%s' % self.cm.ph.getSearchGroups( item, '''"videoId"\s*:\s*"([^"]+?)"''')[0] data = json_loads('{' + tmp + '}') title = data['videoRenderer']['title'][ 'accessibility']['accessibilityData']['label'] time = '' desc = title params = { 'type': 'video', 'category': 'video', 'title': title, 'url': url, 'icon': icon.replace('&', '&'), 'time': time, 'desc': desc } currList.append(params) data2 = None # nextPage try: match = re.search('"continuation":"([^"]+?)"', data) xsrf_token = data.split("XSRF_TOKEN\":\"")[1].split( "\"")[0] ctoken = data.split( "\"nextContinuationData\":{\"continuation\":\"" )[1].split("\"")[0] itct = data.split( "\"{}\",\"clickTrackingParams\":\"".format( ctoken))[1].split("\"")[0] self.postdata = {"session_token": xsrf_token} if not match: nextPage = "" else: nextPage = 'https://www.youtube.com/comment_service_ajax?action_get_comments=1&continuation=%s&pbj=1&ctoken=%s&itct=%s' % ( ctoken, ctoken, itct) except Exception: printExc() try: if '' != nextPage: item = dict(cItem) item.update({ 'title': _("Next page"), 'page': str(int(page) + 1), 'url': nextPage }) currList.append(item) except Exception: printExc() except Exception: printExc() return [] return currList
def getDirectLinks(self, url, formats='flv, mp4', dash=True, dashSepareteList=False, allowVP9=None, allowAgeGate=None): printDBG("YouTubeParser.getDirectLinks") list = [] try: if self.cm.isValidUrl(url) and '/channel/' in url and url.endswith( '/live'): sts, data = self.cm.getPage(url) if sts: videoId = self.cm.ph.getSearchGroups( data, '''<meta[^>]+?itemprop=['"]videoId['"][^>]+?content=['"]([^'^"]+?)['"]''' )[0] if videoId == '': videoId = self.cm.ph.getSearchGroups( data, '''['"]REDIRECT_TO_VIDEO['"]\s*\,\s*['"]([^'^"]+?)['"]''' )[0] if videoId == '': videoId = ph.search(data, 'video_id=(.*?)"')[0] if videoId != '': url = 'https://www.youtube.com/watch?v=' + videoId list = YoutubeIE()._real_extract(url, allowVP9=allowVP9, allowAgeGate=allowAgeGate) except Exception: printExc() if dashSepareteList: return [], [] else: return [] reNum = re.compile('([0-9]+)') retHLSList = [] retList = [] dashList = [] # filter dash dashAudioLists = [] dashVideoLists = [] if dash: # separete audio and video links for item in list: if 'mp4a' == item['ext']: dashAudioLists.append(item) elif item['ext'] in ('mp4v', 'webmv'): dashVideoLists.append(item) elif 'mpd' == item['ext']: tmpList = getMPDLinksWithMeta(item['url'], checkExt=False) printDBG(tmpList) for idx in range(len(tmpList)): tmpList[idx]['format'] = "%sx%s" % (tmpList[idx].get( 'height', 0), tmpList[idx].get('width', 0)) tmpList[idx]['ext'] = "mpd" tmpList[idx]['dash'] = True dashList.extend(tmpList) # sort by quality -> format def _key(x): if x['format'].startswith('>'): int(x['format'][1:-1]) else: int(ph.search(x['format'], reNum)[0]) dashAudioLists = sorted(dashAudioLists, key=_key, reverse=True) dashVideoLists = sorted(dashVideoLists, key=_key, reverse=True) for item in list: printDBG(">>>>>>>>>>>>>>>>>>>>>") printDBG(item) printDBG("<<<<<<<<<<<<<<<<<<<<<") if -1 < formats.find(item['ext']): if 'yes' == item['m3u8']: format = re.search('([0-9]+?)p$', item['format']) if format != None: item['format'] = format.group(1) + "x" item['ext'] = item['ext'] + "_M3U8" item['url'] = decorateUrl(item['url'], {"iptv_proto": "m3u8"}) retHLSList.append(item) else: format = re.search('([0-9]+?x[0-9]+?$)', item['format']) if format != None: item['format'] = format.group(1) item['url'] = decorateUrl(item['url']) retList.append(item) if len(dashAudioLists): # use best audio for item in dashVideoLists: item = dict(item) item["url"] = decorateUrl( "merge://audio_url|video_url", { 'audio_url': dashAudioLists[0]['url'], 'video_url': item['url'] }) dashList.append(item) # try to get hls format with alternative method if 0 == len(retList): try: video_id = YoutubeIE()._extract_id(url) url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id sts, data = self.cm.getPage( url, { 'header': { 'User-agent': 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10' } }) if sts: data = data.replace('\\"', '"').replace('\\\\\\/', '/') hlsUrl = self.cm.ph.getSearchGroups( data, '''"hlsvp"\s*:\s*"(https?://[^"]+?)"''')[0] hlsUrl = json_loads('"%s"' % hlsUrl) if self.cm.isValidUrl(hlsUrl): hlsList = getDirectM3U8Playlist(hlsUrl) if len(hlsList): dashList = [] for item in hlsList: item['format'] = "%sx%s" % (item.get( 'with', 0), item.get('heigth', 0)) item['ext'] = "m3u8" item['m3u8'] = True retList.append(item) except Exception: printExc() if 0 == len(retList): retList = retHLSList if dash: try: sts, data = self.cm.getPage( url, { 'header': { 'User-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' } }) data = data.replace('\\"', '"').replace('\\\\\\/', '/').replace('\\/', '/') dashUrl = self.cm.ph.getSearchGroups( data, '''"dashmpd"\s*:\s*"(https?://[^"]+?)"''')[0] dashUrl = json_loads('"%s"' % dashUrl) if '?' not in dashUrl: dashUrl += '?mpd_version=5' else: dashUrl += '&mpd_version=5' printDBG("DASH URL >> [%s]" % dashUrl) if self.cm.isValidUrl(dashUrl): dashList = getMPDLinksWithMeta(dashUrl, checkExt=False) printDBG(dashList) for idx in range(len(dashList)): dashList[idx]['format'] = "%sx%s" % ( dashList[idx].get('height', 0), dashList[idx].get('width', 0)) dashList[idx]['ext'] = "mpd" dashList[idx]['dash'] = True except Exception: printExc() for idx in range(len(retList)): if retList[idx].get('m3u8', False): retList[idx]['url'] = strwithmeta( retList[idx]['url'], {'iptv_m3u8_live_start_index': -30}) if dashSepareteList: return retList, dashList else: retList.extend(dashList) return retList
def getVideoLinks(self, videoUrl): printDBG("LosMovies.getVideoLinks [%s]" % videoUrl) urlTab = [] # mark requested link as used one if len(self.cacheLinks.keys()): for key in self.cacheLinks: for idx in range(len(self.cacheLinks[key])): if videoUrl in self.cacheLinks[key][idx]['url']: if not self.cacheLinks[key][idx]['name'].startswith( '*'): self.cacheLinks[key][idx][ 'name'] = '*' + self.cacheLinks[key][idx][ 'name'] break if not self.cm.isValidUrl(videoUrl): return [] tab = self.up.getVideoLinkExt(videoUrl) if len(tab): return tab sts, data = self.getPage(videoUrl, self.defaultParams) if not sts: return [] printDBG(data) if 'onlinemovietv' in self.up.getDomain( videoUrl) and 'You are being redirected' in data: cookie = self.unSecure(data) if cookie != None: params = dict(self.defaultParams) params['cookie_items'] = cookie sts, data = self.getPage(videoUrl, params) if not sts: return [] #printDBG(data) subTracks = [] tmp = self.cm.ph.getDataBeetwenMarkers(data, 'sources', ']')[1] if tmp != '': tmp = tmp.split('}') urlAttrName = 'file' sp = ':' else: tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, '<source', '>', withMarkers=True) urlAttrName = 'src' sp = '=' for item in tmp: url = self.cm.ph.getSearchGroups( item.replace('\\/', '/'), r'''['"]?{0}['"]?\s*{1}\s*['"](https?://[^"^']+)['"]'''.format( urlAttrName, sp))[0] if not self.cm.isValidUrl(url): continue name = self.cm.ph.getSearchGroups( item, r'''['"]?label['"]?\s*{0}\s*['"]?([^"^'^,]+)[,'"]'''.format( sp))[0] printDBG('---------------------------') printDBG('url: ' + url) printDBG('name: ' + name) printDBG('+++++++++++++++++++++++++++') printDBG(item) if 'mp4' in item: urlTab.append({'name': name, 'url': url}) elif 'captions' in item: format = url[-3:] if format in ['srt', 'vtt']: subTracks.append({ 'title': name, 'url': self.getFullIconUrl(url), 'lang': name, 'format': format }) printDBG(subTracks) if len(subTracks): for idx in range(len(urlTab)): urlTab[idx]['url'] = strwithmeta( urlTab[idx]['url'], {'external_sub_tracks': subTracks}) return urlTab
def isVP9Allowed(): value = config.plugins.iptvplayer.ytVP9.value printDBG("1. ALLOW VP9: >> %s" % value) value = YouTubeParser.isDashAllowed() and value printDBG("2. ALLOW VP9: >> %s" % value) return value
def exploreItem(self, cItem): printDBG("XrysoiSE.exploreItem") sts, data = self.cm.getPage(cItem['url']) if not sts: return desc = self.cleanHtmlStr( self.cm.ph.getSearchGroups( data, '<meta[^>]*?property="og:description"[^>]*?content="([^"]+?)"') [0]) title = self.cleanHtmlStr( self.cm.ph.getSearchGroups( data, '<meta[^>]*?property="og:title"[^>]*?content="([^"]+?)"')[0]) if '' == title: title = cItem['title'] # trailer link extraction trailerMarker = '/trailer' sts, trailer = self.cm.ph.getDataBeetwenMarkers( data, trailerMarker, '</iframe>', False, False) if sts: trailer = self.cm.ph.getSearchGroups(trailer, '<iframe[^>]+?src="([^"]+?)"', 1, ignoreCase=True)[0] if trailer.startswith('//'): trailer = 'http:' + trailer if trailer.startswith('http'): params = dict(cItem) params['title'] = 'TRAILER' params['mode'] = 'trailer' params['links'] = [{ 'name': 'TRAILER', 'url': trailer, 'need_resolve': 1 }] params['desc'] = desc self.addVideo(params) # check ms1 = '<b>ΠΕΡΙΛΗΨΗ</b>' if ms1 in data: m1 = ms1 elif trailerMarker in data: m1 = trailerMarker else: m1 = '<!-- END TAG -->' sts, linksData = self.cm.ph.getDataBeetwenMarkers( data, m1, '<center>', False, False) if not sts: return mode = cItem.get('mode', 'unknown') # find all links for this season eLinks = {} episodes = [] if '-collection' in cItem['url']: mode = 'collect_item' spTab = [ re.compile('<b>'), re.compile( '<div[\s]+class="separator"[\s]+style="text-align\:[\s]+center;">' ), re.compile('<div[\s]+style="text-align\:[\s]+center;">') ] for sp in spTab: if None != sp.search(linksData): break collectionItems = sp.split(linksData) if len(collectionItems) > 0: del collectionItems[0] linksData = '' for item in collectionItems: itemTitle = item.find('<') if itemTitle < 0: continue itemTitle = self.cleanHtmlStr(item[:itemTitle]) linksData = re.compile( '<a[^>]*?href="([^"]+?)"[^>]*?>').findall(item) links = [] for itemUrl in linksData: if 1 != self.up.checkHostSupport(itemUrl): continue links.append({ 'name': self.up.getHostName(itemUrl), 'url': itemUrl, 'need_resolve': 1 }) if len(links): params = dict(cItem) params.update({ 'title': itemTitle, 'mode': mode, 'links': links, 'desc': desc }) self.addVideo(params) elif '>Season' in linksData or '>Σεζόν' in linksData: if '>Season' in linksData: seasonMarker = '>Season' else: seasonMarker = '>Σεζόν' mode = 'episode' seasons = linksData.split(seasonMarker) if len(seasons) > 0: del seasons[0] for item in seasons: seasonID = item.find('<') if seasonID < 0: continue seasonID = item[:seasonID + 1] seasonID = self.cm.ph.getSearchGroups(seasonID, '([0-9]+?)[^0-9]')[0] if '' == seasonID: continue episodesData = re.compile( '<a[^>]*?href="([^"]+?)"[^>]*?>([^<]+?)</a>').findall(item) for eItem in episodesData: eUrl = eItem[0] eID = eItem[1].strip() if eUrl.startswith('//'): eUrl += 'http' if 1 != self.up.checkHostSupport(eUrl): continue linksID = '-s{0}e{1}'.format(seasonID, eID) if linksID not in eLinks: eLinks[linksID] = [] episodes.append({ 'linksID': linksID, 'episode': eID, 'season': seasonID }) eLinks[linksID].append({ 'name': self.up.getHostName(eUrl), 'url': eUrl, 'need_resolve': 1 }) for item in episodes: linksID = item['linksID'] if len(eLinks[linksID]): params = dict(cItem) params.update({ 'title': title + linksID, 'mode': mode, 'episode': item['episode'], 'season': item['season'], 'links': eLinks[linksID], 'desc': desc }) self.addVideo(params) else: links = self.getLinksForMovie(linksData) if len(links): params = dict(cItem) params['mode'] = 'movie' params['links'] = links params['desc'] = desc self.addVideo(params)
def parseListBase(self, data, type='video'): printDBG("parseListBase----------------") urlPatterns = { 'video': ['video', 'href="[ ]*?(/watch\?v=[^"]+?)"', ''], 'channel': ['category', 'href="(/[^"]+?)"', ''], 'playlist': ['category', 'list=([^"]+?)"', '/playlist?list='], 'movie': ['video', 'data-context-item-id="([^"]+?)"', '/watch?v='], 'live': ['video', 'href="(/watch\?v=[^"]+?)"', ''], 'tray': ['video', 'data-video-id="([^"]+?)"', '/watch?v='], } currList = [] for i in range(len(data)): #printDBG("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") # get requaired params url = urlPatterns[type][2] + self.getAttributes( urlPatterns[type][1], data[i]) # get title title = '' #self.getAttributes('title="([^"]+?)"', data[i]) if '' == title: title = self.getAttributes( 'data-context-item-title="([^"]+?)"', data[i]) if '' == title: title = self.getAttributes('data-video-title="([^"]+?)"', data[i]) if '' == title: sts, title = CParsingHelper.getDataBeetwenMarkers( data[i], '<h3 class="yt-lockup-title">', '</h3>', False) if '' == title: sts, title = CParsingHelper.getDataBeetwenReMarkers( data[i], re.compile('<span [^>]*?class="title[^>]*?>'), re.compile('</span>'), False) if '' == title: sts, title = CParsingHelper.getDataBeetwenReMarkers( data[i], re.compile('class="pl-video-title-link[^>]*?>'), re.compile('<'), False) if '' == title: titleMarker = self.cm.ph.getSearchGroups( data[i], '(<[^">]+?"yt-lockup-title[^"]*?"[^>]*?>)')[0] if '' != titleMarker: tidx = titleMarker.find(' ') if tidx > 0: tmarker = titleMarker[1:tidx] title = self.cm.ph.getDataBeetwenMarkers( data[i], titleMarker, '</%s>' % tmarker)[1] if '' != title: title = CParsingHelper.cleanHtmlStr(title) if i == 0: printDBG(data[i]) img = self.getAttributes('data-thumb="([^"]+?\.jpg[^"]*?)"', data[i]) if '' == img: img = self.getAttributes('src="([^"]+?\.jpg[^"]*?)"', data[i]) if '' == img: img = self.getAttributes('<img[^>]+?data\-thumb="([^"]+?)"', data[i]) if '' == img: img = self.getAttributes('<img[^>]+?src="([^"]+?)"', data[i]) if '.gif' in img: img = '' time = self.getAttributes('data-context-item-time="([^"]+?)"', data[i]) if '' == time: time = self.getAttributes('class="video-time">([^<]+?)</span>', data[i]) if '' == time: sts, time = CParsingHelper.getDataBeetwenReMarkers( data[i], re.compile('pl-video-time"[^>]*?>'), re.compile('<'), False) if '' == time: sts, time = CParsingHelper.getDataBeetwenReMarkers( data[i], re.compile('timestamp"[^>]*?>'), re.compile('<'), False) time = time.strip() # desc descTab = [] desc = self.cm.ph.getDataBeetwenMarkers( data[i], '<div class="yt-lockup-meta', '</div>')[1] if desc != '': descTab.append(desc) desc = self.cm.ph.getDataBeetwenMarkers( data[i], '<span class="formatted-video-count', '</span>')[1] if desc != '': descTab.append(desc) desc = self.cm.ph.getDataBeetwenReMarkers( data[i], re.compile('class="video-description[^>]+?>'), re.compile('</p>'), False)[1] if '' == desc: desc = self.cm.ph.getDataBeetwenReMarkers( data[i], re.compile('class="yt-lockup-description[^>]+?>'), re.compile('</div>'), False)[1] if desc != '': descTab.append(desc) newDescTab = [] for desc in descTab: desc = CParsingHelper.cleanHtmlStr(desc) if desc != '': newDescTab.append(desc) urlTmp = url.split(';') if len(urlTmp) > 0: url = urlTmp[0] if type == 'video': url = url.split('&')[0] #printDBG("#####################################") #printDBG('url [%s] ' % url) #printDBG('title [%s] ' % title) #printDBG('img [%s] ' % img) #printDBG('time [%s] ' % time) #printDBG('desc [%s] ' % desc) if title != '' and url != '' and img != '': correctUrlTab = [url, img] for i in range(len(correctUrlTab)): if not correctUrlTab[i].startswith( 'http:') and not correctUrlTab[i].startswith( 'https:'): if correctUrlTab[i].startswith("//"): correctUrlTab[i] = 'http:' + correctUrlTab[i] else: correctUrlTab[ i] = 'http://www.youtube.com' + correctUrlTab[i] #else: # if correctUrlTab[i].startswith('https:'): # correctUrlTab[i] = "http:" + correctUrlTab[i][6:] title = CParsingHelper.cleanHtmlStr(title) params = { 'type': urlPatterns[type][0], 'category': type, 'title': title, 'url': correctUrlTab[0], 'icon': correctUrlTab[1].replace('&', '&'), 'time': time, 'desc': '[/br]'.join(newDescTab) } currList.append(params) return currList
def getMainCategory(self): printDBG('Spryciarze.getMainCategory') self.catTree = [] self.currList = [] sts, data = self.cm.getPage(self.MAIN_CATEGORIES_URL) if not sts: return # clear punks printDBG('Before clear') pos = data.find('<div class="content_prawo">') if pos > -1: data = data[:pos] printDBG('After clear') catTab = data.split('<div class="box_kategorie_item_head">') # free memory data = '' if len(catTab) > 0: del catTab[0] printDBG('catTab len %d' % len(catTab)) for i in range(len(catTab)): subTab = catTab[i].split('<div class="box_kategorie_item_lista">') # Free memory catTab[i] = '' if 2 == len(subTab): # Get Main category data pattern = '<div class="box_kategorie_item_head_ico (.+?)"></div>.+?<a href="(.+?)" class="box_kategorie_item_head_tytul">(.+?)</a>.+?<div class="box_kategorie_item_head_ilosc">\(([0-9]+?)\)</div>.+?<div class="box_kategorie_item_head_bottom">' match = re.compile(pattern, re.DOTALL).findall(subTab[0]) if len(match) == 1: catItem = { 'type': 'main', 'url': match[0][1], 'name': match[0][2], 'ilosc': match[0][3], 'subCatList': [] } self.currList.append(catItem) else: printDBG('getMainCategory ignore wrong data for category') else: printDBG( 'getMainCategory ignore wrong data for category: 2 != len(subTab)' ) catItem = {'type': 'search', 'name': 'Wyszukaj', 'subCatList': []} self.currList.append(catItem) return
def listItems(self, cItem, nextCategory=None): printDBG("LosMovies.listItems") url = cItem['url'] page = cItem.get('page', 1) letter = cItem.get('letter', '') getParams = [] if page > 1: getParams.append("page=%d" % page) if letter != '': getParams.append("letter=%s" % letter) getParams = '&'.join(getParams) if '?' in url: url += '&' + getParams else: url += '?' + getParams sts, data = self.getPage(url) if not sts: return nextPage = self.cm.ph.getDataBeetwenMarkers(data, 'pagination', '</div>', False)[1] if '' != self.cm.ph.getSearchGroups(nextPage, 'page=(%s)[^0-9]' % (page + 1))[0]: nextPage = True else: nextPage = False if cItem['category'] in ['list_items', 'search', 'search_next_page']: marker = 'movie' else: marker = 'rubric' data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<div id="' + marker, '</h4>', withMarkers=True) for item in data: url = self.getFullUrl( self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0]) if not self.cm.isValidUrl(url): continue icon = self.getFullUrl( self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"')[0]) desc = self.cleanHtmlStr(item) title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<h4', '</h4>')[1]) if title == '': title = self.cleanHtmlStr( self.cm.ph.getSearchGroups(item, 'title="([^"]+?)"')[0]) if title == '': title = self.cleanHtmlStr( self.cm.ph.getSearchGroups(item, 'alt="([^"]+?)"')[0]) params = { 'good_for_fav': True, 'title': title, 'url': url, 'desc': desc, 'info_url': url, 'icon': icon } if 'class="movieTV"' not in item and '/movie-list/' not in item: self.addVideo(params) else: params['category'] = nextCategory params2 = dict(cItem) params2.update(params) self.addDir(params2) if nextPage and len(self.currList) > 0: params = dict(cItem) params.update({'title': _("Next page"), 'page': page + 1}) self.addDir(params)