def __init__(self): CBaseHostClass.__init__(self, { 'history': 'ako.am', 'cookie': 'ako.am.cookie' }) self.MAIN_URL = 'https://ar.akoam.net/' self.USER_AGENT = self.cm.getDefaultHeader()['User-Agent'] self.HTTP_HEADER = { 'User-Agent': self.USER_AGENT, 'DNT': '1', 'Accept': 'text/html', 'Accept-Encoding': 'gzip, deflate', 'Referer': self.getMainUrl(), 'Origin': self.getMainUrl() } self.AJAX_HEADER = MergeDicts( self.HTTP_HEADER, { 'X-Requested-With': 'XMLHttpRequest', 'Accept-Encoding': 'gzip, deflate', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Accept': 'application/json, text/javascript, */*; q=0.01' }) self.DEFAULT_ICON_URL = self.getFullIconUrl( '/scripts/site/img/main_logo.png') self.cacheLinks = {} self.defaultParams = { 'header': self.HTTP_HEADER, 'with_metadata': True, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE }
def listMain(self, cItem, nextCategory): printDBG('Fenixsite >>>>>>>>>>>> listMain >> %s' % cItem) sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, ('<ul', '>', 'navbar'), '</ul>')[1] data = ph.findall(data, ('<li', '>'), '</li>') for item in data: url = ph.getattr(item, 'href') if '/strani_filmovi/' in url or '/strane_serije/' in url: title = ph.clean_html(item) self.addDir( MergeDicts( cItem, { 'category': nextCategory, 'url': self.getFullUrl(url), 'title': title, 'desc': self.HOST_DESC })) MAIN_CAT_TAB = [{ 'category': nextCategory, 'title': 'Anime', 'url': self.getFullUrl('/load/anime_serije/95'), 'desc': self.HOST_DESC }, { 'category': 'search', 'title': _('Search'), 'search_item': True, 'desc': self.HOST_DESC }, { 'category': 'search_history', 'title': _('Search history'), 'desc': self.HOST_DESC }] self.listsTab(MAIN_CAT_TAB, cItem)
def listSortMoviesSeries(self, cItem, nextCategory1, nextCategory2): printDBG("HDFull.listSort") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) hasABCMenu = self.cm.ph.getDataBeetwenMarkers(data, 'filter-title', '</div>', False)[1] hasABCMenu = True if '>#<' in hasABCMenu else False data = self.cm.ph.getDataBeetwenNodes( data, ('<div', '>', 'row-links-wrapper'), ('</div', '>'), False)[1] data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<a', '</a>') for item in data: url = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+?)['"]''')[0]) category = url.rsplit('/')[-1] if hasABCMenu and category == 'abc': category = nextCategory2 fixNextPage = False elif category in ['abc', 'date', 'imdb_rating']: category = nextCategory1 fixNextPage = hasABCMenu else: printDBG("SKIP >> [%s] [%s] item[%s]" % (category, url, item)) continue title = self.cleanHtmlStr(item) self.addDir( MergeDicts( cItem, { 'url': url, 'title': title, 'category': category, 'fix_next_page': fixNextPage }))
def listItems(self, cItem): printDBG("Fenixsite.listItems") page = cItem.get('page', 1) url = cItem['url'] if page == 1: url += '-%s-%s' % (page, cItem['f_sort']) sts, data = self.getPage(url) if not sts: return self.setMainUrl(self.cm.meta['url']) mainDesc = self.cleanHtmlStr(ph.find(data, ('<div', '>', 'shortstory-news'), '</div>', flags=0)[1].split('</h1>', 1)[-1]) tmp = ph.find(data, ('<span', '>', 'pagesBlock'), '</td>')[1] tmp = ph.search(tmp, '''<a([^>]+?spages\(\s*?['"]%s['"][^>]*?)>''' % (page + 1))[0] nextPage = self.getFullUrl(ph.getattr(tmp, 'href')) reIcon = re.compile(r'''<img[^>]+?src=(['"])([^>]*?\.(?:jpe?g|png|gif)(?:\?[^\1]*?)?)(?:\1)''', re.I) data = ph.findall(data, ('<div', '>', 'entry'), '</ul>') for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullIconUrl(ph.search(item, reIcon)[1]) title = self.cleanHtmlStr(ph.find(item, ('<h', '>'), '</h', flags=0)[1]) desc = [] tmp = [ph.find(item, ('<i', '>', 'eye'), '</span', flags=0)[1], ph.find(item, ('<i', '>', 'comments'), '</span', flags=0)[1], ph.find(item, ('<i', '>', 'comments'), '</span', flags=0)[1]] for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) tmp = ph.find(item, ('<ul', '>', 'title'))[1] desc.append(ph.getattr(tmp, 'title').replace('/', ' (') + ')') self.addVideo({'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc) + '[/br]' + mainDesc}) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1}))
def _listLists(self, cItem, nextCategory, data): printDBG("VidCorn._listLists") retList = [] data = self.cm.ph.getAllItemsBeetwenNodes( data, ('<div', '>', 'data-list'), ('<div', '>', 'list-content')) for item in data: listId = self.cm.ph.getSearchGroups( item, '''data\-list=['"]([^"^']+?)['"]''')[0] icon = self.getFullIconUrl( self.cm.ph.getSearchGroups( item, '''<img[^>]+?src=['"]([^"^']+?)['"]''')[0]) title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<h2', '</h2>')[1]) url = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''href=['"]([^"^']+?)["']''', 1, True)[0]) descTab = [] item = self.cm.ph.getAllItemsBeetwenNodes(item, ('<span', '>'), ('</span', '>'), False) for it in item: it = self.cleanHtmlStr(it) if it: descTab.append(it) params = MergeDicts( cItem, { 'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'list_id': listId, 'f_type': 'listas', 'icon': icon, 'desc': '[/br]'.join(descTab) }) retList.append(params) return retList
def listSearchResult(self, cItem, searchPattern, searchType): self.tryTologin() url = self.getFullUrl('/buscar/') + urllib.quote_plus(searchPattern) sts, data = self.getPage(url) if not sts: return self.setMainUrl(self.cm.meta['url']) headersTitles = [] headerData = self.cm.ph.getDataBeetwenNodes( data, ('<ul', '>', 'search-menu'), ('</ul', '>'), False)[1] headerData = self.cm.ph.getAllItemsBeetwenMarkers( headerData, '<li', '</li>') for item in headerData: headersTitles.append(self.cleanHtmlStr(item)) data = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'search-item'), ('<div', '>', 'dialog'), False)[1] data = re.compile('<div[^>]+?search\-item[^>]+?>').split(data) for idx in range(len(data)): itemData = data[idx] if idx == 1: subItem = self._listLists(cItem, 'list_list_items', itemData) else: subItem = self._listItems(cItem, itemData) if len(subItem): params = MergeDicts( cItem, { 'good_for_fav': False, 'category': 'sub_items', 'title': headersTitles[idx], 'sub_items': subItem }) self.addDir(params)
def _addTrailer(self, cItem, title, data): printDBG("CartoonHD._addTrailer") httpParams = dict(self.defaultParams) httpParams['header'] = { 'Referer': self.cm.meta['url'], 'User-Agent': self.cm.HOST, 'X-Requested-With': 'XMLHttpRequest', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' } tmp = self.cm.ph.getDataBeetwenNodes(data, ('<a', '>', 'watch-trailer'), ('</a', '>'))[1] tmp = dict( re.compile('''\sdata\-([^=]+?)=['"]([^'^"]+?)['"]''').findall(tmp)) sts, tmp = self.cm.getPage(self.getFullUrl('/ajax/trailer.php'), httpParams, tmp) if not sts: return try: tmp = json_loads(tmp) if tmp.get('valid'): url = self.getFullUrl( self.cm.ph.getSearchGroups( tmp['trailer'], '''<iframe[^>]+?src=['"]([^"^']+?)['"]''', 1, True)[0]) if url: self.addVideo( MergeDicts(cItem, { 'title': title, 'url': url, 'prev_url': cItem['url'] })) except Exception: printExc()
def initApi(self): if self.initData: return url = self.API_BASE_URL + '/idm/anonymous/login/v1.0' params = MergeDicts(self.defaultParams, { 'raw_post_data': True, 'collect_all_headers': True }) cid = str(uuid.uuid4()) post_data = '{"cid":"%s","platform":"pc","appName":"web/mediasetplay-web/2e96f80"}' % cid sts, data = self.getPage(url, params, post_data=post_data) if not sts: return printDBG(data) printDBG(self.cm.meta) try: headers = { 't-apigw': self.cm.meta['t-apigw'], 't-cts': self.cm.meta['t-cts'] } data = json_loads(data) if data['isOk']: tmp = data['response'] self.initData.update({ 'traceCid': tmp['traceCid'], 'cwId': tmp['cwId'], 'cid': cid }) self.HTTP_HEADER.update(headers) except Exception: printExc() if not self.initData: self.sessionEx.waitForFinishOpen(MessageBox, _("API initialization failed!"), type=MessageBox.TYPE_ERROR, timeout=20)
def showPlaylist(self, cItem): printDBG("Dplay show playlist") list_id = cItem["id"] h = self.getHeader() if h == None or h == "": printDBG('Dplay wrong initialization') return sts, data = self.getPage(self.PLAYLIST_URL.format(list_id), {'header': h}) if not sts: return #printDBG(data) response = json_loads(data) for video in response["Data"]["Items"]: icon = video['Images'][0]['Src'] title = video["Name"] desc = video["Description"] video_id = video["Id"] videoUrl = video["PlaybackInfoUrl"] printDBG("add video '%s' with playback info url '%s'" % (title, videoUrl)) self.addVideo( MergeDicts( cItem, { 'title': title, 'name': title, 'desc': desc, 'video_id': video_id, 'url': videoUrl, 'icon': icon, 'category': 'video' }))
def getList(self, cItem): printDBG("Wiz1NetApi.getChannelsList") channelsTab = [] sts, data = self.getPage(self.MAIN_URL, self.http_params) if not sts: return [] #self.setMainUrl(self.cm.meta['url']) #self.http_params['header']['Referer'] = self.cm.meta['url'] printDBG(data) frame_url = re.findall("<iframe.*src=\"(.*?)\"", data) if frame_url: sts, data = self.getPage(self.getFullUrl(frame_url[0]), self.http_params) if not sts: return [] desc = ph.clean_html( ph.find(data, ('<h4', '>'), ('<br', '>'), flags=0)[1]) data = ph.rfindall(data, '</a>', ('<br', '>'), flags=0) for item in data: title = ph.clean_html(item) url = self.getFullUrl(ph.search(item, ph.A)[1]) channelsTab.append( MergeDicts(cItem, { 'type': 'video', 'title': title, 'url': url, 'desc': desc })) else: printDBG("Iframe source not found!") return channelsTab
def listPopular(self, cItem): printDBG("DixMax.listPopular") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) try: data = json_loads(data) for item in (('series', 'Series mas populares'), ('movie', 'Peliculas mas populares'), ('latest', 'Ultimas fichas agregadas')): subItems = self._listItems(cItem, 'explore_item', data['result'][item[0]]) if subItems: self.addDir( MergeDicts( cItem, { 'title': item[1], 'category': 'sub_items', 'sub_items': subItems })) except Exception: printExc()
def tryTologin(self): printDBG('tryTologin start') if None == self.loggedIn or self.login != config.plugins.iptvplayer.internetowa_login.value or\ self.password != config.plugins.iptvplayer.internetowa_password.value: rm(self.COOKIE_FILE) self.login = config.plugins.iptvplayer.internetowa_login.value self.password = config.plugins.iptvplayer.internetowa_password.value rm(self.COOKIE_FILE) sts, data = self.cm.getPage(self.getFullUrl('/logowanie/'), self.http_params) if sts: self.setMainUrl(self.cm.meta['url']) self.loggedIn = False if '' == self.login.strip() or '' == self.password.strip(): return False if sts: params = dict(self.http_params) params['header'] = MergeDicts(self.HTTP_HEADER, {'Referer':self.getFullUrl('/logowanie/')}) post_data = {'email': self.login, 'password': self.password} sts, data = self.cm.getPage( self.getFullUrl('/logowanie/'), params, post_data) if sts and '/wyloguj' in data: printDBG('tryTologin OK') self.loggedIn = True else: msgTab = [_('Login failed.')] if sts: msgTab.append(self.cleanHtmlStr(self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'errorBox'), ('</div', '>'), False)[1])) self.sessionEx.waitForFinishOpen(MessageBox, '\n'.join(msgTab), type = MessageBox.TYPE_ERROR, timeout = 10) printDBG('tryTologin failed') return self.loggedIn
def listNewsFeed(self, cItem): printDBG("VUMEDI.listNewsFeed [%s]" % cItem) page = cItem.get('page', 0) url = self.getFullUrl('/beats/{0}/?is_long=true'.format(page)) params = dict(self.defaultParams) params['header'] = MergeDicts(self.AJAX_HEADER, {'Referer':cItem['url']}) sts, data = self.getPage(url, params) if not sts: return try: data = byteify(json.loads(data)) nextPage = data.get('start', -1) data = data['beats'] self.listVideoItems(cItem, data) if nextPage > page: params = dict(cItem) params.update({'good_for_fav':False, 'title':_("Next page"), 'page':nextPage}) self.addDir(params) except Exception: printExc()
def listItems(self, cItem): printDBG("BajeczkiOrg.listItems") sts, data = self.getPage(cItem['url']) if not sts: return nextPage = ph.find(data, ('<a', '>', 'next page-'))[1] nextPage = self.getFullUrl(ph.getattr(nextPage, 'href'), self.cm.meta['url']) descObj = re.compile('''<span[^>]+?>''') data = ph.find(data, '<main', '</main>', flags=0)[1] printDBG(data) data = re.compile('<(?:div|article)[^>]+?hentry[^>]+?>').split(data) for idx in range(1, len(data), 1): item = data[idx] url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) if url == '': continue # icon = self.getFullUrl( ph.search(item, ph.IMAGE_SRC_URI_RE)[1] ) icon = self.getFullIconUrl(self.cm.ph.getSearchGroups(item, '''data-src=['"]([^'^"]+?)['"]''', ignoreCase=True)[0]) item = item.split('</h2>', 1) title = ph.clean_html(item[0]) desc = [] tmp = descObj.split(item[-1]) for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) params = dict(cItem) params = {'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': '[/br]'.join(desc)} self.addVideo(params) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'url': nextPage}))
def listRadioItems(self, cItem): printDBG('MP3COInfo.listRadioItems %s' % cItem) sts, data = self.getPage(cItem['url'], self.getDefaultParams(True)) if not sts: return cUrl = self.cm.meta['url'] self.setMainUrl(cUrl) try: data = json_loads(data) for item in data['stations']: url = self.getFullUrl(item['url']) title = ph.clean_html(item['name']) icon = self.getFullIconUrl(item.get('img', '')) self.addAudio( MergeDicts( cItem, { 'good_for_fav': True, 'url': url, 'title': title, 'icon': icon })) except Exception: printExc()
def listEvad(self, cItem): try: if len(cItem) > 0: sts, data = self.cm.getPage(cItem['url'], self.defaultParams) if not sts: return if len(data) == 0: return data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="details-block seasons">', '</div>', False)[1] if len(data) == 0: return data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<button onclick=', '</button>') if len(data) == 0: return for item in data: url = self.cm.ph.getSearchGroups(item, '''href=['"]([^"^']+?)['"]''')[0] if not self.cm.isValidUrl(url): continue tmp_evad = self.cm.ph.getDataBeetwenMarkers(item, '</i>', '</button>', False)[1] m = re.search(r'\d+',tmp_evad) if m is not None: evad = m.group(0) if evad == '': continue else: params = MergeDicts(cItem, {'category':'list_evad_item', 'url': cItem['url'] + '/seasons/' + str(evad), 'eretitle': cItem['title'], 'title':str(evad) + '. évad'}) self.addDir(params) except Exception: printExc()
def listItems(self, cItem, nextCategory): printDBG("HDFull.listItems") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) page = cItem.get('page', 1) if cItem.get('fix_next_page'): nextPage = self.cm.meta['url'] if page == 1: if nextPage.endswith('/'): nextPage = nextPage[:-1] else: if nextPage.endswith('/'): nextPage = nextPage[:-1] nextPage = nextPage.rsplit('/', 1)[0] nextPage += '/%d' % (page + 1) else: nextPage = self.cm.ph.getDataBeetwenMarkers( data, 'filter-title', '</div>', False)[1] nextPage = self.getFullUrl( self.cm.ph.getSearchGroups( nextPage, '''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>\s*?%s\s*?<''' % (page + 1))[0]) self.currList.extend(self._listItems(cItem, nextCategory, data)) if nextPage and len(self.currList): self.addDir( MergeDicts(cItem, { 'url': nextPage, 'title': _('Next page'), 'page': page + 1 }))
def listMain(self, cItem): printDBG("VidCorn.listMain") sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) data = self.cm.ph.getDataBeetwenNodes(data, ('<ul', '>', 'navegacion'), ('</ul', '>'), False)[1] data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<a', '</a>') for item in data: if 'dropdown' in item: continue url = self.cm.ph.getSearchGroups( item, '''\shref=['"]([^'^"]+?)['"]''')[0] category = url.rsplit('/', 1)[-1] if category not in ['series', 'peliculas', 'listas', 'gente']: continue title = self.cleanHtmlStr(item) params = MergeDicts( cItem, { 'category': category, 'f_type': category, 'title': title, 'url': self.getFullUrl(url) }) self.addDir(params) MAIN_CAT_TAB = [{ 'category': 'search', 'title': _('Search'), 'search_item': True }, { 'category': 'search_history', 'title': _('Search history'), }] self.listsTab(MAIN_CAT_TAB, cItem)
def listItems(self, cItem, nextCategory): printDBG('DixMax.listItems %s' % cItem) page = cItem.get('page', 1) url = cItem['url'][:-1] if cItem['url'].endswith('/') else cItem['url'] url += '/page/%s' % page query = {} for key, val in cItem.iteritems(): if key.startswith('f_') and key != 'f_idx': query[key[2:]] = val url += '?' + urllib.urlencode(query) sts, data = self.getPage(url) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, ('<div', '>', 'card'), '</section>')[1].split('paginator', 1) if len(data) == 2: nextPage = ph.search(data[-1], '/page/(%s)[^0-9]' % (page + 1))[0] else: nextPage = '' self.currList = self._listItems2(cItem, nextCategory, data[0]) if nextPage: self.addDir(MergeDicts(cItem, {'title': _('Next page'), 'page': page + 1}))
def exploreItem(self, cItem): try: if len(cItem) > 0: sts, data = self.cm.getPage(cItem['url'], self.defaultParams) if not sts: return if len(data) == 0: return jtmb = self.cm.ph.getDataBeetwenMarkers( data, "vars.title = {", "};", False)[1].strip() if len(jtmb) == 0: return leiras = cItem['leiras'] if leiras != '': leiras = re.sub(r'^(.{1000}).*$', '\g<1>...', leiras.replace('\n', '').strip()) params = MergeDicts( cItem, { 'good_for_fav': True, 'title': cItem['title'], 'url': cItem['url'], 'desc': leiras, 'icon': cItem['icon'] }) self.addVideo(params) except Exception: return
def listSearchResult(self, cItem, searchPattern, searchType): url = self.getFullUrl( '/api/private/get/search?query=%s&limit=100&f=1' % urllib.quote(searchPattern)) sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) value = ph.search( data, '''var\s*?dle_login_hash\s*?=\s*?['"]([^'^"]+?)['"]''')[0] post_data = { 'query': searchPattern, 'user_hash': value, 'do': 'search', 'subaction': 'search', 'story': searchPattern } self.listItems( MergeDicts( cItem, { 'url': self.getFullUrl('/index.php?do=search'), 'post_data': post_data }), 'explore_item')
def getItems(self, cItem, nextCategory, data): retList = [] sTitle = ph.clean_html( ph.find(data, ('<span', '>', 'format-header_title'), '</span>', flags=0)[1]) data = ph.findall(data, ('<article', '>', 'teaser'), '</article>', flags=ph.START_S) for idx in range(1, len(data), 2): item = data[idx] url = ph.search(item, ph.A)[1] icon = self.getFullIconUrl(ph.getattr(item, 'data-src')) desc = ph.clean_html( ph.find(item, ('<div', '>', 'caption'), '</div>', flags=0)[1]) title = ph.clean_html( ph.find(item, ('<h5', '>', 'title'), '</h5>', flags=0)[1]) if title == '': title = url.rsplit('/', 1)[-1].replace( '-', ' ').decode('utf-8').title().encode('utf-8') desc = [desc] if desc else [] desc.append( ph.clean_html(ph.find(item, ('<p', '>'), '</p>', flags=0)[1])) if sTitle: title = '%s: %s' % (sTitle, title) params = MergeDicts( cItem, { 'good_for_fav': True, 'title': title, 'url': self.getFullUrl(url), 'icon': icon, 'desc': '[/br]'.join(desc) }) if 'class-clip' in data[idx - 1]: # and '-clip' in url: params.update({'type': 'video'}) else: params.update({'category': nextCategory}) retList.append(params) return retList
def handleService(self, index, refresh=0, searchPattern='', searchType=''): printDBG('handleService start') CBaseHostClass.handleService(self, index, refresh, searchPattern, searchType) name = self.currItem.get("name", '') category = self.currItem.get("category", '') printDBG("handleService: ||| name[%s], category[%s] " % (name, category)) self.currList = [] #MAIN MENU if name == None: self.listMain({'name': 'category', 'type': 'category'}) elif category == 'browse': self.listDirectories(self.currItem) elif category == 'sub_items': self.listSubItems(self.currItem) elif category == 'dir_channels': self.listDirChannels(self.currItem, 'list_channel') elif category == 'list_channel': self.listChannel(self.currItem) elif category == 'videos_types': self.listsTab( self.VIDEOS_TYPES_TAB, MergeDicts(self.currItem, {'category': 'videos_sort'})) elif category == 'videos_sort': self.listsTab( self.VIDEOS_SORT_TAB, MergeDicts(self.currItem, {'category': 'list_videos'})) elif category == 'list_videos': self.listVideos(self.currItem) elif category == 'clips_filters': self.listsTab( self.CLIPS_FILTERS_TAB, MergeDicts(self.currItem, {'category': 'list_clips'})) elif category == 'list_clips': self.listClips(self.currItem) elif category == 'dir_games': self.listDirGames(self.currItem, 'browse_game') elif category == 'browse_game': self.listsTab(self.GAME_CAT_TAB, self.currItem) elif category == 'game_lang': self.listsTab( self.langItems, MergeDicts(self.currItem, {'category': self.currItem['next_category']})) elif category == 'game_channels': self.listGameChannels(self.currItem, 'list_channel') elif category == 'game_videos_types': self.listsTab( self.VIDEOS_TYPES_TAB, MergeDicts(self.currItem, {'category': 'game_videos_sort'})) elif category == 'game_videos_sort': self.listsTab( self.VIDEOS_SORT_TAB, MergeDicts(self.currItem, {'category': 'game_list_videos'})) elif category == 'game_list_videos': self.listGameVideos(self.currItem) elif category == 'game_clips_filters': self.listsTab( self.CLIPS_FILTERS_TAB, MergeDicts(self.currItem, {'category': 'game_list_clips'})) elif category == 'game_list_clips': self.listGameClips(self.currItem) elif category == 'v5_channels': self.listV5Channels(self.currItem) elif category == 'v5_games': self.listV5Games(self.currItem) elif category == 'v5_streams': self.listV5Streams(self.currItem) #SEARCH elif category in ["search", "search_next_page"]: cItem = dict(self.currItem) cItem.update({'search_item': False, 'name': 'category'}) self.listSearchResult(cItem, searchPattern, searchType) #HISTORIA SEARCH elif category == "search_history": self.listsHistory({ 'name': 'history', 'category': 'search' }, 'desc', _("Type: ")) else: printExc() CBaseHostClass.endHandleService(self, index, refresh)
def listChannel(self, cItem): printDBG("Twitch.listChannel %s" % cItem['user_login']) login = cItem['user_login'] post_data = [] post_data.append( '{"operationName":"ChannelShell","variables":{"login":"******"},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"d6b850262351d0a1e01369809ca87ef837c45e148301053a8f6a9dc440d3c806"}}}' % login) post_data.append( '{"operationName":"ChannelPage_ChannelHeader","variables":{"login":"******"},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"32f05e9f36086c6e6930e3f3d0d515eea61cc3263bf7f92870f97c9aae024593"}}}' % login) post_data.append( '{"operationName":"ChannelPage_StreamType_User","variables":{"channelLogin":"******"},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"43b152e4f17090ece0b50a5bc41e4690c7a6992ad3ed876d88bf7292be2d2cba"}}}' % login) post_data.append( '{"operationName":"ChannelPage__ChannelViewersCount","variables":{"login":"******"},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"3b5b233b59cc71f5ab273c74a30c46485fa52901d98d7850d024ad0669270184"}}}' % login) post_data.append( '{"operationName":"StreamMetadata","variables":{"channelLogin":"******"},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"1c719a40e481453e5c48d9bb585d971b8b372f8ebb105b17076722264dfa5b3e"}}}' % login) url = self.getFullUrl('/gql', self.API2_URL) sts, data = self.getPage( url, MergeDicts(self.defaultParams, {'raw_post_data': True}), '[%s]' % ','.join(post_data)) if not sts: return printDBG("Twitch.listChannel %s" % data) icon = '' try: data = json.loads(data) try: if data[2]['data']['user']['stream']['type'] == 'live': descTab = [] viewers = str( data[3]['data']['user']['stream']['viewersCount']) descTab.append(_('%s viewers') % viewers) title = jstr(data[4]['data']['user']['lastBroadcast'], 'title') item = data[4]['data']['user']['stream'] if item.get('game'): descTab.append('%s: %s' % (jstr(item['game'], '__typename'), jstr(item['game'], 'name'))) icon = self.getFullIconUrl( jstr(item['game'], 'boxArtURL'), self.cm.meta['url']) else: icon = '' params = { 'good_for_fav': False, 'title': title, 'game_id': str(item['id']), 'video_type': 'live', 'channel_id': login, 'icon': icon, 'desc': '[/br]'.join(descTab) } self.addVideo(params) except Exception: printExc() item = data[1]['data']['user'] icon = self.getFullIconUrl(jstr(item, 'profileImageURL'), self.cm.meta['url']) videosCount = int(item['videos']['totalCount']) if videosCount: params = dict(cItem) params.update({ 'good_for_fav': False, 'category': 'videos_types', 'title': _('Videos %s') % videosCount, 'icon': icon, 'desc': '' }) self.addDir(params) except Exception: printExc() params = MergeDicts( cItem, { 'good_for_fav': False, 'category': 'clips_filters', 'title': _('Clips'), 'icon': icon, 'desc': '' }) self.addDir(params)
def tryTologin(self): printDBG('tryTologin start') self.selectDomain() if None == self.loggedIn or self.login != config.plugins.iptvplayer.filmixco_login.value or\ self.password != config.plugins.iptvplayer.filmixco_password.value: loginCookie = GetCookieDir('filmix.co.login') self.login = config.plugins.iptvplayer.filmixco_login.value self.password = config.plugins.iptvplayer.filmixco_password.value sts, data = self.getPage(self.getMainUrl()) if sts: self.setMainUrl(self.cm.meta['url']) freshSession = False if sts and 'action=logout' in data: printDBG("Check hash") hash = hexlify(md5('%s@***@%s' % (self.login, self.password)).digest()) prevHash = ReadTextFile(loginCookie)[1].strip() printDBG("$hash[%s] $prevHash[%s]" % (hash, prevHash)) if hash == prevHash: self.loggedIn = True return else: freshSession = True rm(loginCookie) rm(self.COOKIE_FILE) if freshSession: sts, data = self.getPage(self.getMainUrl(), MergeDicts(self.defaultParams, {'use_new_session':True})) self.loggedIn = False if '' == self.login.strip() or '' == self.password.strip(): return False msgTab = [_('Login failed.')] if sts: actionUrl = self.getFullUrl('/engine/ajax/user_auth.php') post_data = {'login_name':self.login, 'login_password':self.password, 'login_not_save':'1', 'login':'******'} httpParams = dict(self.defaultParams) httpParams['header'] = MergeDicts(httpParams['header'], {'Referer':self.cm.meta['url'], 'Accept':'*/*', 'X-Requested-With':'XMLHttpRequest', 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'}) sts, data = self.getPage(actionUrl, httpParams, post_data) printDBG(data) if sts: msgTab.append(ph.clean_html(data)) sts, data = self.getPage(self.getMainUrl()) if sts and 'action=logout' in data: printDBG('tryTologin OK') self.loggedIn = True else: printDBG(data) self.sessionEx.waitForFinishOpen(MessageBox, '\n'.join(msgTab), type = MessageBox.TYPE_ERROR, timeout = 10) printDBG('tryTologin failed') if self.loggedIn: hash = hexlify(md5('%s@***@%s' % (self.login, self.password)).digest()) WriteTextFile(loginCookie, hash) return self.loggedIn
def tryTologin(self): printDBG('tryTologin start') if None == self.loggedIn or self.login != config.plugins.iptvplayer.serienstreamto_login.value or\ self.password != config.plugins.iptvplayer.serienstreamto_password.value: loginCookie = GetCookieDir('s.to.login') self.login = config.plugins.iptvplayer.serienstreamto_login.value self.password = config.plugins.iptvplayer.serienstreamto_password.value sts, data = self.cm.getPage(self.getMainUrl(), self.defaultParams) if sts: self.setMainUrl(self.cm.meta['url']) freshSession = False if sts and '/home/logout' in data: printDBG("Check hash") hash = hexlify( md5('%s@***@%s' % (self.login, self.password)).digest()) prevHash = ReadTextFile(loginCookie)[1].strip() printDBG("$hash[%s] $prevHash[%s]" % (hash, prevHash)) if hash == prevHash: self.loggedIn = True return else: freshSession = True rm(loginCookie) rm(self.COOKIE_FILE) if freshSession: sts, data = self.cm.getPage( self.getMainUrl(), MergeDicts(self.defaultParams, {'use_new_session': True})) self.loggedIn = False if '' == self.login.strip() or '' == self.password.strip(): return False actionUrl = self.getFullUrl('/login') post_data = { 'email': self.login, 'password': self.password, 'autoLogin': '******' } tries = 0 while tries < 3: tries += 1 errorMsg = '' httpParams = dict(self.defaultParams) httpParams['header'] = dict(httpParams['header']) httpParams['header']['Referer'] = actionUrl sts, data = self.getPage(actionUrl, httpParams, post_data) printDBG( "+++++++++++++++++++++++++++++++++++++++++++++++++++++++") printDBG(data) printDBG( "+++++++++++++++++++++++++++++++++++++++++++++++++++++++") if sts and not data.strip(): sts, data = self.getPage(actionUrl) if sts and '/home/logout' in data: printDBG('tryTologin OK') self.loggedIn = True break elif sts: errorMsg = ph.clean_html( ph.find(data, ('<div', '>', 'messageAlert'), '</div>', flags=0)[1]) tmp1 = ph.find(data, ('<div', '>', 'formCaptcha'), '</div>', flags=0)[1] imgUrl = self.getFullUrl( ph.search(tmp1, ph.IMAGE_SRC_URI_RE)[1], self.cm.meta['url']) tmp2 = ph.find(data, ('<input', '>', 'captcha'), flags=0)[1] if imgUrl: captchaLabel = _('Captcha') captchaTitle = errorMsg sendLabel = _('Send') header = dict(httpParams['header']) header['Accept'] = 'image/png,image/*;q=0.8,*/*;q=0.5' params = dict(self.defaultParams) params.update({ 'maintype': 'image', 'subtypes': ['jpeg', 'png'], 'check_first_bytes': ['\xFF\xD8', '\xFF\xD9', '\x89\x50\x4E\x47'], 'header': header }) filePath = GetTmpDir('.iptvplayer_captcha.jpg') rm(filePath) ret = self.cm.saveWebFile(filePath, imgUrl.replace('&', '&'), params) if not ret.get('sts'): SetIPTVPlayerLastHostError( _('Fail to get "%s".') % imgUrl) return params = deepcopy(IPTVMultipleInputBox.DEF_PARAMS) params['accep_label'] = sendLabel params['title'] = captchaLabel params['status_text'] = captchaTitle params['status_text_hight'] = 200 params['with_accept_button'] = True params['list'] = [] item = deepcopy(IPTVMultipleInputBox.DEF_INPUT_PARAMS) item['label_size'] = (660, 110) item['input_size'] = (680, 25) item['icon_path'] = filePath item['title'] = _('Answer') item['input']['text'] = '' params['list'].append(item) #params['vk_params'] = {'invert_letters_case':True} ret = 0 retArg = self.sessionEx.waitForFinishOpen( IPTVMultipleInputBox, params) printDBG(retArg) if retArg and len(retArg) and retArg[0]: printDBG(retArg[0]) post_data['captcha'] = retArg[0][0] continue else: break if self.loggedIn: hash = hexlify( md5('%s@***@%s' % (self.login, self.password)).digest()) WriteTextFile(loginCookie, hash) else: self.sessionEx.open(MessageBox, _('Login failed.') + '\n' + errorMsg, type=MessageBox.TYPE_ERROR, timeout=10) return self.loggedIn
def getLinksForVideo(self, cItem): urlsTab = [] rm(self.COOKIE_FILE) sts, data = self.getPage(cItem['url']) if not sts: return [] tmp = ph.find(data, ('<glomex-player', '>'))[1] if tmp: player_id = ph.getattr(tmp, 'data-player-id') playlist_id = ph.getattr(tmp, 'data-playlist-id') url = 'https://integration-cloudfront-eu-west-1.mes.glomex.cloud/?integration_id=%s&playlist_id=%s¤t_url=' % ( player_id, playlist_id) sts, data = self.getPage(url) if not sts: return [] try: data = json_loads(data)['videos'][0]['source'] if data.get('hls'): hlsUrl = self.cm.getFullUrl(data['hls'], self.cm.meta['url']) urlsTab = getDirectM3U8Playlist( hlsUrl, checkContent=True, sortWithMaxBitrate=999999999, mergeAltAudio=True) if len(urlsTab): urlsTab.append({ 'name': 'Variable M3U8/HLS', 'url': hlsUrl, 'need_resolve': 0 }) # progressive links seem do not work why? if False and data.get('progressive'): mp4Url = self.cm.getFullUrl(data['progressive'], self.cm.meta['url']) urlsTab.append({ 'name': 'progressive mp4', 'url': mp4Url, 'need_resolve': 0 }) except Exception: printExc() else: urlParams = dict(self.defaultParams) urlParams['header'] = MergeDicts(urlParams['header'], {'Referer': self.cm.meta['url']}) urlParams['raw_post_data'] = True urlParams['use_new_session'] = True playerData = ph.find(data, 'getPlayer(', ');', flags=0)[1].split(',') printDBG("playerData <<< %s" % playerData) if len(playerData) == 6: url = self.cm.getFullUrl( '/videoplayer/playerhls.php?play=%s&key=%d&identifier=web&v5partner=%s&autoplay=true&event' % (playerData[1].strip(), int( time.time() * 1000), playerData[3].strip()), self.cm.meta['url']) sts, data = self.getPage(url, urlParams) urlParams['header'] = MergeDicts( urlParams['header'], {'Referer': self.cm.meta['url']}) url = self.cm.getFullUrl( '/server/videoConfig.php?videoid=%s&partnerid=%s&language=%s&format=iphone' % (playerData[1].strip(), playerData[3].strip(), playerData[5].strip()[1:-1]), self.cm.meta['url']) sts, data = self.getPage(url, urlParams) try: url = json_loads(data)['video']['streamAccess'] url = self.cm.getFullUrl(url, self.cm.meta['url']) sts, data = self.getPage(url, urlParams, '[""]') try: printDBG("++++") printDBG(data) printDBG("++++") data = json_loads(data)['data']['stream-access'] for url in data: sts, streamData = self.getPage( self.cm.getFullUrl(url, self.cm.meta['url']), urlParams) if not sts: continue printDBG("?----?") printDBG(data) printDBG("?----?") token = ph.getattr(streamData, 'auth') hlsUrl = self.cm.getFullUrl( ph.getattr(streamData, 'url'), self.cm.meta['url']) + '?hdnea=' + token urlsTab = getDirectM3U8Playlist( hlsUrl, checkContent=True, sortWithMaxBitrate=999999999, mergeAltAudio=True) break except Exception: printExc() except Exception: printExc() return urlsTab
def showProgram(self, cItem, pagenum=0): printDBG('La7 - start ondemand single program list') url = self.getFullUrl(cItem["url"] + "/rivedila7") sts, html = self.getPage(url) if not sts: return if pagenum == 0: # last episode try: replica = ph.findall(html, "<div class=\"contenitoreUltimaReplica", "<div class=\"clearfix\"></div>")[0] regex_icon = "<img src=\"(.*?)\"" icon = re.findall(regex_icon, replica)[0] regex_url = "<a href=\"(.*?)\"" url = re.findall(regex_url, replica)[0] t, title = ph.find(replica, "<div class=\"title\">", "</div>", flags=0) t, data = ph.find(replica, "<div class=\"dataPuntata\">", "</div>", flags=0) t, desc = ph.find( replica, "<div class=\"views-field views-field-field-testo-lancio\"><p>", "</", flags=0) title = title + " (" + data + ")" title = HTMLParser.HTMLParser().unescape(title).encode('utf-8') desc = HTMLParser.HTMLParser().unescape(desc).encode('utf-8') self.addVideo( MergeDicts( cItem, { 'category': 'epg_item', 'title': title, 'url': url, 'icon': icon, 'desc': desc })) except: printDBG( "la7 - no last episode video box for program '{0}'".format( cItem["title"])) # last week episodes url = self.getFullUrl(cItem["url"] + "/rivedila7/settimana") sts, html = self.getPage(url) if sts: try: repliche = ph.findall(html, "<div class=\"itemPuntata", "</span></div>") for replica in repliche: regex_icon = "<img.*data-src=\"(.*?)\"" icon = re.findall(regex_icon, replica)[0] regex_url = "div class=\"title\"><a href=\"(.*?)\">(.*?)</a>" url, title = re.findall(regex_url, replica)[0] t, data = ph.find(replica, "<div class=\"dataPuntata\">", "</div>", flags=0) t, desc = ph.find( replica, "<div class=\"views-field views-field-field-testo-lancio\">", "</div>", flags=0) title = title + " (" + data + ")" title = HTMLParser.HTMLParser().unescape(title).encode( 'utf-8') desc = HTMLParser.HTMLParser().unescape(desc).encode( 'utf-8') self.addVideo( MergeDicts( cItem, { 'category': 'epg_item', 'title': title, 'url': url, 'icon': icon, 'desc': desc })) except: printDBG( "la7 - no last week episodes for program '{0}'".format( cItem["title"])) else: printDBG( "la7 - error searching last week episodes for program '{0}'" .format(cItem["title"])) # older episodes url = self.getFullUrl(cItem["url"] + "/rivedila7/archivio?page={0}".format(pagenum)) sts, html = self.getPage(url) if not sts: return repliche = ph.findall(html, "<div class=\"itemPuntata", "</span></div>") for replica in repliche: regex_icon = "<img.*data-src=\"(.*?)\"" icon = re.findall(regex_icon, replica)[0] regex_url = "div class=\"title\"><a href=\"(.*?)\">(.*?)</a>" url, title = re.findall(regex_url, replica)[0] t, data = ph.find(replica, "<div class=\"dataPuntata\">", "</div>", flags=0) t, desc = ph.find( replica, "<div class=\"views-field views-field-field-testo-lancio\">", "</div>", flags=0) title = title + " (" + data + ")" title = HTMLParser.HTMLParser().unescape(title).encode('utf-8') desc = HTMLParser.HTMLParser().unescape(desc).encode('utf-8') self.addVideo( MergeDicts( cItem, { 'category': 'epg_item', 'title': title, 'url': url, 'icon': icon, 'desc': desc })) # look for next button in page if html.find("<li class=\"pager-next\">") != -1: pagenum = pagenum + 1 self.addMore( MergeDicts( cItem, { 'category': 'program_next', 'title': _('Next page'), 'page_number': pagenum }))
def getPage(self, baseUrl, addParams={}, post_data=None): tries = 0 cUrl = '' while tries < 4: tries += 1 if addParams == {}: addParams = dict(self.defaultParams) sts, data = self.cm.getPage(baseUrl, addParams, post_data) if not sts: return sts, data cUrl = self.cm.meta['url'] if 'DDoS' in data: if tries == 1: rm(self.COOKIE_FILE) continue timestamp = time.time() * 1000 jscode = '' tmp = ph.findall(data, ('<script', '>'), '</script>', flags=0) for item in tmp: if 'xhr.open' in item: jscode = item break js_params = [{'path': GetJSScriptFile('cinemaxx1.byte')}] js_params.append({'code': jscode}) ret = js_execute_ext(js_params) if ret['sts'] and 0 == ret['code']: try: tmp = ret['data'].split('\n', 1) sleep_time = int(float(tmp[1])) tmp = json_loads(tmp[0]) url = self.getFullUrl(tmp['1'], cUrl) params = dict(addParams) params['header'] = MergeDicts(self.HTTP_HEADER, {'Referer': cUrl}) sts2, data2 = self.cm.getPage(url, params) if not sts2: break js_params = [{ 'path': GetJSScriptFile('cinemaxx2.byte') }] js_params.append( {'code': data2 + 'print(JSON.stringify(e2iobj));'}) ret = js_execute_ext(js_params) if ret['sts'] and 0 == ret['code']: cj = self.cm.getCookie(self.COOKIE_FILE) for item in json_loads(ret['data'])['cookies']: for cookieKey, cookieValue in item.iteritems(): cookieItem = cookielib.Cookie( version=0, name=cookieKey, value=cookieValue, port=None, port_specified=False, domain='.' + self.cm.getBaseUrl(cUrl, True), domain_specified=True, domain_initial_dot=True, path='/', path_specified=True, secure=False, expires=time.time() + 3600 * 48, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False) cj.set_cookie(cookieItem) cj.save(self.COOKIE_FILE, ignore_discard=True) sleep_time -= time.time() * 1000 - timestamp if sleep_time > 0: GetIPTVSleep().Sleep( int(math.ceil(sleep_time / 1000.0))) continue else: break except Exception: printExc() else: break if sts and cUrl: self.cm.meta['url'] = cUrl return sts, data
def exploreItem(self, cItem, nextCategory): printDBG("Cinemaxx.exploreItem") self.cacheLinks = {} sts, data = self.getPage(cItem['url']) if not sts: return cUrl = self.cm.meta['url'] self.setMainUrl(cUrl) desc = [] descObj = self.getArticleContent(cItem, data)[0] icon = descObj['images'][0]['url'] baseTitle = descObj['title'] for item in descObj['other_info']['custom_items_list']: desc.append(item[1]) desc = ' | '.join(desc) + '[/br]' + descObj['text'] data = ph.find(data, ('<div', '>', 'dle-content'), ('<div', '>', 'fstory-info'), flags=0)[1] trailer = ph.find(data, ('<', '>', '#trailer'), '</div>', flags=0)[1] title = self.cleanHtmlStr(trailer) trailer = self.getFullUrl(ph.search(trailer, ph.IFRAME_SRC_URI_RE)[1]) if trailer: self.addVideo({ 'good_for_fav': True, 'prev_url': cUrl, 'title': '%s %s' % (title, baseTitle), 'url': trailer, 'icon': icon, 'desc': desc }) data = ph.find(data, ('<div', '>', 'full-video'), '</div>', flags=0)[1] url = self.getFullUrl(ph.search(data, ph.IFRAME_SRC_URI_RE)[1]) if url: if ('/video/' in url and '/serials/' in url) or 'playlist' in url: url = strwithmeta(url, {'Referer': cUrl}) seasons = self.hdgocc.getSeasonsList(url) for item in seasons: self.addDir( MergeDicts( cItem, { 'good_for_fav': False, 'prev_url': cUrl, 'category': nextCategory, 'serie_title': baseTitle, 'title': 'Staffel %s' % item['title'], 'season_id': item['id'], 'url': item['url'], 'icon': icon, 'desc': desc })) if 0 != len(seasons): return seasonUrl = url episodes = self.hdgocc.getEpiodesList(seasonUrl, -1) for item in episodes: title = '{0} - {1} - s01e{2} '.format( baseTitle, item['title'], str(item['id']).zfill(2)) self.addVideo({ 'good_for_fav': False, 'type': 'video', 'prev_url': cUrl, 'title': title, 'url': item['url'], 'icon': icon, 'desc': desc }) if 0 != len(episodes): return self.addVideo({ 'good_for_fav': False, 'prev_url': cUrl, 'title': baseTitle, 'url': url, 'icon': icon, 'desc': desc }) else: data = ph.find(data, 'vk.show(', ');', flags=0)[1].split(',', 1)[-1] ret = js_execute('print(JSON.stringify(%s));' % data) if ret['sts'] and 0 == ret['code']: try: data = json_loads(ret['data']) for sNum, season in enumerate(data, 1): subItems = [] for eNum, episode in enumerate(season, 1): title = baseTitle + ' s%se%s' % ( str(sNum).zfill(2), str(eNum).zfill(2)) subItems.append({ 'good_for_fav': False, 'type': 'video', 'prev_url': cUrl, 'title': title, 'url': episode, 'icon': icon, 'desc': desc }) if subItems: self.addDir( MergeDicts( cItem, { 'good_for_fav': False, 'prev_url': cUrl, 'title': 'Staffel %s' % (str(sNum).zfill(2)), 'category': 'sub_items', 'sub_items': subItems })) except Exception: printExc()