def getList(self, cItem): printDBG("Wiz1NetApi.getChannelsList") channelsTab = [] sts, data = self.getPage(self.getFullUrl('/schedule'), self.http_params) if not sts: return [] self.setMainUrl(self.cm.meta['url']) self.http_params['header']['Referer'] = self.cm.meta['url'] url = self.getFullUrl(ph.search(data, ph.IFRAME)[1]) sts, data = self.getPage(url, self.http_params) if not sts: return [] desc = ph.clean_html( ph.find(data, ('<h4', '>'), ('<br', '>'), flags=0)[1]) data = ph.rfindall(data, '</a>', ('<br', '>'), flags=0) for item in data: title = ph.clean_html(item) url = self.getFullUrl(ph.search(item, ph.A)[1]) channelsTab.append( MergeDicts(cItem, { 'type': 'video', 'title': title, 'url': url, 'desc': desc })) return channelsTab
def listVodItems(self, cItem, nextCategory): printDBG("EskaGo.listVodItems") page = cItem.get('page', 1) url = cItem['url'].replace('/vod/', '/ajax/vod/') sts, data = self.cm.getPage(url) if not sts: return nextPage = ph.find(data, ('<div', '>', 'pagination'), '</div>', flags=0)[1] nextPage = self.cm.getFullUrl(ph.search(nextPage, r'''<a[^>]+?href=(['"])([^>]*?)(?:\1)[^>]*?>%s<''' % (page + 1))[1], self.cm.meta['url']) if '/filmy' in url: reIcon = re.compile(r'''<img[^>]+?data\-src=(['"])([^>]*?\.(?:jpe?g|png)(?:\?[^\1]*?)?)(?:\1)''', re.I) data = ph.findall(data, ('<div', '>', 'tooltip'), '</li>') for item in data: url = ph.find(item, ('<div', '>', 'box-tv-slide'), '</div>', flags=0)[1] url = self.cm.getFullUrl(ph.search(url, ph.A_HREF_URI_RE)[1], self.cm.meta['url']) icon = self.cm.getFullUrl(ph.search(item, reIcon)[1], self.cm.meta['url']) title = self.cleanHtmlStr(ph.find(item, ('<h', '>'), ('</h', '>'), flags=0)[1]) desc = [] desc.append(self.cleanHtmlStr(ph.find(item, ('<span', '>', 'cat-date'), '</span>', flags=0)[1])) desc.append(self.cleanHtmlStr(ph.find(item, ('<span', '>', 'cat-time'), '</span>', flags=0)[1])) desc = ' | '.join(desc) + '[/br]' + self.cleanHtmlStr(ph.find(item, ('<p', '>', 'opis-view'), '</p>', flags=0)[1]) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon, 'desc': desc})) else: data = ph.findall(data, ('<div', '>', 'slider-section'), ('<div', '>', '_slide'), flags=0) for item in data: url = self.cm.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1], self.cm.meta['url']) icon = self.cm.getFullUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1], self.cm.meta['url']) title = self.cleanHtmlStr(ph.find(item, ('<h', '>'), ('</h', '>'), flags=0)[1]) desc = self.cleanHtmlStr(ph.find(item, '<p', '</p>')[1]) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon, 'desc': desc})) if nextPage: self.addDir(MergeDicts(cItem, {'title': _('Next page'), 'url': nextPage, 'page': page + 1}))
def getItems(self, cItem): printDBG("WagasWorldApi.getItems") list = [] page = cItem.get('page', 0) url = cItem['url'] if page > 0: if '?' in url: url += '&' else: url += '?' url += 'page={0}'.format(page) sts, data = self.cm.getPage(url, self.http_params) if not sts: return list nextPage = False if '&page={0}"'.format(page+1) in data: nextPage = True data = ph.find(data, '<div class="view-content">', '</section>')[1] data = data.split('</span>') if len(data): del data[-1] for item in data: title = ph.search(item, '>([^<]+?)</a>')[0] url = self.getFullUrl( ph.getattr(item, 'href') ) icon = self.getFullIconUrl( ph.search(item, ph.IMG)[1] ) if '' != url and '' != title: list.append( {'waga_cat':'explore', 'type':'waga_cat', 'title':ph.clean_html(title), 'icon':icon, 'url':url} ) if nextPage: list.append({'type':'waga_cat', 'waga_cat':'items', 'title':_('Next page'), 'url':cItem['url'], 'page':page+1}) return list
def listVodEpisodes(self, cItem, data=None): printDBG("EskaGo.listVodEpisodes") if not data: url = cItem['url'].replace('/serial/', '/ajax/serial/') sts, data = self.cm.getPage(url) if not sts: return sTitle = cItem['s_title'] data = ph.findall(data, ('<div', '>', 'box-movie-small'), '</div>', flags=0) for item in data: url = self.cm.getFullUrl( ph.search(item, ph.A_HREF_URI_RE)[1], self.cm.meta['url']) icon = self.cm.getFullUrl( ph.search(item, ph.IMAGE_SRC_URI_RE)[1], self.cm.meta['url']) title = self.cleanHtmlStr( ph.find(item, ('<strong', '>'), '</strong>', flags=0)[1]) self.addVideo({ 'good_for_fav': True, 'title': '%s %s' % (sTitle, title), 'url': url, 'icon': icon })
def exploreItem(self, cItem): printDBG("KKisteAG.exploreItem") sts, mainData = self.getPage(cItem['url']) if not sts: return url = self.getFullUrl(ph.search(mainData, ph.IFRAME)[1]) if not url: return if 'season=' in url: sts, data = self.getPage(url + '&referrer=link') if sts: data = data.split('</body>', 1)[1] data = ph.find(data, ('<span', '>', 'server'), '</div>')[1] data = ph.findall(data, ('<span', '>'), '</span>') for item in data: title = ph.clean_html(ph.getattr(item, 'title')) if not title: title = ph.clean_html(item) url = self.getFullUrl(ph.search(item, ph.A)[1]) self.addVideo( MergeDicts( cItem, { 'title': '%s: %s' % (cItem['title'], title), 'url': url })) else: self.addVideo(MergeDicts(cItem, {'url': url}))
def listSearchItems(self, cItem): printDBG("Christusvincit.listSearchItems") page = cItem.get('page', 1) sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, 'search_result', '</table>', flags=0)[1] data = re.compile('''<div[^>]+?pagenav[^>]*?>''').split(data, 1) if len(data) == 2: nextPage = ph.find(data[-1], ('<a', '>%s<' % (page + 1)))[1] nextPage = self.getFullUrl(ph.getattr(nextPage, 'href')) else: nextPage = '' data = ph.findall(data[0], ('<a', '>', ph.check(ph.any, ('articles.php', 'readarticle.php'))), '</span>') for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullUrl(ph.search(item, self.reImgObj)[1]) item = item.split('</a>', 1) title = self.cleanHtmlStr(item[0]) desc = self.cleanHtmlStr(item[-1]) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': 'explore_item', 'title': title, 'url': url, 'icon': icon, 'desc': desc})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'page': page + 1, 'url': nextPage}))
def getLinksForVideo(self, cItem): printDBG("Redbull.getLinksForVideo %s" % cItem['url']) urlsTab = [] sts, data = self.getPage(cItem['url']) if not sts: return [] #printDBG("Redbull.getLinksForVideo.data |%s|" % data) videoUrl = ph.search(data, '''<mediaURL>([^"]+?)<''')[0] if videoUrl: urlsTab.extend( getDirectM3U8Playlist(videoUrl, checkExt=True, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999)) else: url = self.getFullUrl( ph.search(data, '''loadPage\(['"]([^'^"]+?)['"]''')[0]) sts, data = self.getPage(url) if not sts: return [] printDBG("hostredbull.getLinksForVideo.data |%s|" % data) videoUrl = ph.search(data, '''<mediaURL>([^"]+?)<''')[0] if videoUrl: urlsTab.extend( getDirectM3U8Playlist(videoUrl, checkExt=True, variantCheck=True, checkContent=True, sortWithMaxBitrate=99999999)) return urlsTab
def listItems(self, cItem, nextCategory): printDBG("Cinemaxx.listItems") cItem = dict(cItem) page = cItem.get('page', 1) post_data = cItem.pop('post_data', None) sts, data = self.getPage(cItem['url'], post_data=post_data) if not sts: return self.setMainUrl(self.cm.meta['url']) printDBG(data) tmp = ph.find(data, ('<div', '>', 'pages-numbers'), '</div>')[1] tmp = ph.search(tmp, '''<a([^>]+?)>%s<''' % (page + 1))[0] nextPage = self.getFullUrl(ph.getattr(tmp, 'href')) data = ph.find(data, ('<div', '>', 'shortstory'), ('<div', '>', 'clearfix'))[1] data = ph.rfindall(data, '</div>', ('<div', '>', 'shortstory')) for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullIconUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1]) title = self.cleanHtmlStr(ph.find(item, ('<h', '>'), '</h', flags=0)[1]) desc = [] tmp = [ph.find(item, ('<', '>', 'current-rating'), ('</', '>'), flags=0)[1] + '/100'] tmp.extend(ph.findall(item, ('<span', '>'), '</span>', flags=0)) for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc)})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1}))
def listSearchItems(self, cItem, nextCategory): printDBG("HD1080Online.listSearchItems") cItem = dict(cItem) page = cItem.get('page', 1) post_data = cItem['post_data'] if page > 1: post_data.update({'search_start':page, 'full_search':0, 'result_from':(page-1)*10+1}) sts, data = self.getPage(cItem['url'], post_data=post_data) if not sts: return self.setMainUrl(self.cm.meta['url']) nextPage = ph.find(data, ('<div', '>', 'pagi-nav'), '</div>', flags=0)[1] nextPage = (ph.search(nextPage, '<a[^>]+?>(\s*%d\s*)<' % (page + 1))[0]) data = ph.findall(data, ('<a', '>', 'sres-wrap'), '</a>') for item in data: url = self.getFullUrl( ph.search(item, ph.A)[1] ) icon = self.getFullIconUrl( ph.search(item, ph.IMG)[1] ) title = self.cleanHtmlStr(ph.find(item, ('<h', '>'), '</h', flags=0)[1]) desc = [] desc.append(ph.clean_html(ph.find(item, ('<div', '>', 'date'), '</div>', flags=0)[1])) desc.append(ph.clean_html(ph.find(item, ('<div', '>', 'desc'), '</div>', flags=0)[1])) self.addDir(MergeDicts(cItem, {'good_for_fav':True, 'category':nextCategory, 'title':title, 'url':url, 'icon':icon, 'desc':'[/br]'.join(desc)})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav':False, 'title':_('Next page'), 'page':page + 1}))
def listItems(self, cItem): printDBG("KKisteAG.listItems") page = cItem.get('page', 1) if page == 1 and 'f_idx' in cItem: url = '' query = {} for key in self.cacheFiltersKeys: val = cItem.get('f_' + key) if not val: continue query[key] = val url = self.getFullUrl('?c=movie&m=filter&' + urllib.urlencode(query)) else: url = cItem['url'] sts, data = self.getPage(url) if not sts: return if page == 1 and 'f_idx' not in cItem: tmp = ph.find(data, 'function load_contents', '}')[1] url = self.getFullUrl(ph.search(tmp, '''['"]([^'^"]*m=[^'^"]*?)['"]''')[0]) if url: self.listItems2(MergeDicts(cItem, {'url':url, 'category':'list_items2'})) return nextPage = ph.find(data, ('<div', '>', 'pag-nav'), '</div>', flags=0)[1] nextPage = self.getFullUrl(ph.clean_html(ph.search(nextPage, '''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>\s*?%s\s*?<''' % (page + 1))[0])) data = ph.find(data, ('<div', '>', 'loop-content'), ('<div', '>', 'loop-nav'))[1] self.doListItems(cItem, data) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav':False, 'title':_('Next page'), 'url':nextPage, 'page':page + 1}))
def getLinksForVideo(self, cItem): urlsTab = [] sts, data = self.getPage(cItem['url']) if not sts: return [] printDBG("hostredbull.getLinksForVideo.data |%s|" % data) videoUrl = ph.search(data, '''<mediaURL>([^"]+?)<''')[0] if videoUrl: tmp = getDirectM3U8Playlist(videoUrl, checkExt=True, checkContent=True) for item in tmp: name = '%sx%s , bitrate: %s' % (item['width'], item['height'], item['bitrate']) urlsTab.append({'name':name, 'url':item['url'], 'need_resolve':0, 'bitrate':item['bitrate'], 'original':''}) urlsTab.sort(key=lambda x: x['bitrate'], reverse=True) return urlsTab else: url = self.getFullUrl(ph.search(data, '''onPlay="loadPage\(['"]([^'^"]+?)['"]''')[0]) sts, data = self.getPage(url) if not sts: return [] printDBG("hostredbull.getLinksForVideo.data |%s|" % data) videoUrl = ph.search(data, '''<mediaURL>([^"]+?)<''')[0] tmp = getDirectM3U8Playlist(videoUrl, checkExt=True, checkContent=True) for item in tmp: name = '%sx%s , bitrate: %s' % (item['width'], item['height'], item['bitrate']) urlsTab.append({'name':name, 'url':item['url'], 'need_resolve':0, 'bitrate':item['bitrate'], 'original':''}) urlsTab.sort(key=lambda x: x['bitrate'], reverse=True) return urlsTab
def listItems(self, cItem, data=None): printDBG('PregledajNET.listItems') page = cItem.get('page', 1) if not data: sts, data = self.cm.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) baseDesc = ph.clean_html(ph.find(data, ('<div', '>', 'description'), '</div>', flags=0)[1]) nextPage = ph.find(data, ('<ul', '>', 'pagination'), '</ul>', flags=0)[1] nextPage = ph.find(nextPage, '<a', '>%s<' % (page + 1))[1] nextPage = self.getFullUrl(ph.search(nextPage, ph.A)[1]) data = ph.find(data, ('<ul', '>', 'browse-video'), '</ul>', flags=0)[1] data = ph.findall(data, ('<li', '>'), '</li>', flags=0) for item in data: tmp = ph.find(item, ('<h3', '>'), '</h3>', flags=0)[1] url = self.getFullUrl(ph.search(tmp, ph.A)[1]) if not url: continue title = ph.clean_html(tmp) tmp = ph.find(item, '<img', '>')[1] icon = self.getFullIconUrl(ph.getattr(tmp, 'data-echo')) if not icon: icon = self.getFullIconUrl(ph.search(tmp, ph.IMG)[1]) desc = [] tmp = ph.rfindall(item, '</div>', ('<div', '>'), flags=0) for t in tmp: t = ph.clean_html(t) if t: desc.append(t) self.addVideo(MergeDicts(cItem, {'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': (' | ').join(desc) + '[/br]' + baseDesc})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1}))
def doListItems(self, cItem, data): data = ph.rfindall(data, '</div>', ('<div', '>', 'post-')) for item in data: url = self.getFullUrl(ph.search(item, ph.A)[1]) icon = self.getFullIconUrl(ph.search(item, self.reIMG)[1]) title = ph.clean_html( ph.find(item, ('<h2', '>'), '</h2>', flags=0)[1]) desc = [] tmp = ph.find(item, ('<div', '>', 'meta'), '</div>', flags=0)[1] tmp = ph.findall(tmp, ('<span', '>'), '</span>', flags=0) for t in tmp: t = ph.clean_html(t) if t: desc.append(t) desc = [' | '.join(desc)] desc.append( ph.clean_html(ph.find(item, ('<p', '>'), '</p>', flags=0)[1])) desc.append( ph.clean_html( ph.find(item, ('<div', '>', 'desc'), '</div>', flags=0)[1])) self.addDir( MergeDicts( cItem, { 'good_for_fav': True, 'prev_url': url, 'category': 'explore_item', 'title': title, 'url': url, 'icon': icon, 'desc': '[/br]'.join(desc) }))
def listItems(self, cItem): printDBG("BajeczkiOrg.listItems") sts, data = self.getPage(cItem['url']) if not sts: return nextPage = ph.find(data, ('<a', '>', 'next page-'))[1] nextPage = self.getFullUrl(ph.getattr(nextPage, 'href'), self.cm.meta['url']) descObj = re.compile('''<span[^>]+?>''') data = ph.find(data, '<main', '</main>', flags=0)[1] printDBG(data) data = re.compile('<(?:div|article)[^>]+?hentry[^>]+?>').split(data) for idx in range(1, len(data), 1): item = data[idx] url = self.getFullUrl( ph.search(item, ph.A_HREF_URI_RE)[1] ) if url == '': continue icon = self.getFullUrl( ph.search(item, ph.IMAGE_SRC_URI_RE)[1] ) item = item.split('</h2>', 1) title = ph.clean_html( item[0] ) desc = [] tmp = descObj.split(item[-1]) for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) params = dict(cItem) params = {'good_for_fav': True, 'title':title, 'url':url, 'icon':icon, 'desc':'[/br]'.join(desc)} self.addVideo(params) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav':False, 'title':_('Next page'), 'url':nextPage}))
def _findMainFunctionName(self): data = self.playerData name = ph.search(data, r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\).*a\.join\(\s*""\s*\)')[0] if name and not any((c in name) for c in ''', '"'''): return name.strip() name = ph.find(data, '"signature",', '}', 0)[1].split('(', 1)[0].strip() if name and not any((c in name) for c in ''', '"'''): return name.strip() name = ph.find(data, '.sig||', '}', 0)[1].split('(', 1)[0].strip() if name and not any((c in name) for c in ''', '"'''): return name.strip() tmp = ph.find(data, 'yt.akamaized.net', '}', 0)[1] if tmp: printDBG("DATA: %s" % tmp) tmp = ph.rfindall(tmp, 'return', '.set(', flags=0) for name in tmp: name = name.replace('decodeURIComponent(', '').replace('encodeURIComponent(', '') printDBG("ITEM: %s" % name) name = ph.search(name, self.RE_MAIN)[0] if name: return name tmp = ph.findall(data, ('.set(', '));', lambda d, l, s, e: not ph.any(')-";', l, s, e))) for name in tmp: name = name.split(',', 1)[-1].split('(', 1)[0].strip() if name and not any((c in name) for c in ''', '";()'''): return name return ''
def listVodCats(self, cItem, nextCategory): printDBG("EskaGo.listVodCats") sts, data = self.cm.getPage(cItem['url']) if not sts: return nextCategoriesMap = { 'filmy': 'vod_movies_cats', 'seriale': 'vod_sort', 'programy': 'vod_channels' } data = ph.find(data, ('<ul', '>', 'categories'), '</ul>')[1] data = ph.findall(data, '<li', '</li>') for item in data: url = ph.search(item, ph.A_HREF_URI_RE)[1] if url == '': continue url = self.cm.getFullUrl(url, self.cm.meta['url']) icon = self.cm.getFullUrl( ph.search(item, ph.IMAGE_SRC_URI_RE)[1], self.cm.meta['url']) tmp = ph.findall(item, '<span', '</span>') title = self.cleanHtmlStr( tmp[-1]) if len(tmp) else self.cleanHtmlStr(item) self.addDir( MergeDicts( cItem, { 'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon }))
def _listItems(self, data): retTab = [] data = ph.rfindall(data, '</div>', ('<div', '>', 'box_movie')) printDBG(data) for item in data: printDBG('+++') url = self.getFullUrl(ph.search(item, ph.A)[1]) if not url: continue title = ph.clean_html(ph.rfind(item, '</div>', '</div>')[1]) if title == '': title = ph.clean_html(ph.getattr(item, 'title')) icon = self.getFullIconUrl(ph.getattr(item, 'data-src')) if icon == '': icon = self.getFullIconUrl(ph.search(item, '''\surl\(([^\)]+?)\)''')[0].strip()) desc = [] tmp = ph.find(item, ('<div', '>', 'cats'), '</div>', flags=0)[1] tmp = ph.findall(tmp, ('<a', '>'), '</a>') for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) desc = [', '.join(desc)] tmp = ph.findall(item, ('<', '>', 'badge-small'), ('</', '>', 'a')) for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) desc = ' | '.join(desc) desc += '[/br]' + ph.clean_html(ph.find(item, ('<p', '>'), '</p>')[1]) retTab.append({'title':title, 'url':url, 'icon':icon, 'desc':desc}) return retTab
def listMain(self, cItem, nextCategory): printDBG("HD1080Online.listMain") sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) tmp = ph.find(data, ('<ul', '>', 'first-menu'), '</ul>', flags=0)[1] tmp = ph.findall(tmp, ('<li', '>'), '</li>', flags=0) for item in tmp: url = self.getFullUrl(ph.search(item, ph.A)[1]) title = ph.clean_html(item) self.addDir(MergeDicts(cItem, {'category':nextCategory, 'url':url, 'title':title})) data = ph.find(data, ('<aside', '>'), '</aside>')[1] tmp = ph.rfindall(data, '</ul>', ('<div', '>'), flags=0) for section in tmp: section = section.split('</div>', 1) sTitle = ph.clean_html(section[0]) section = ph.findall(section[-1], ('<li', '>'), '</li>', flags=0) subItems = [] for item in section: url = self.getFullUrl(ph.search(item, ph.A)[1]) title = ph.clean_html(item) subItems.append( MergeDicts(cItem, {'good_for_fav':True, 'category':nextCategory, 'title':title, 'url':url}) ) if len(subItems): self.addDir(MergeDicts(cItem, {'category':'sub_items', 'sub_items':subItems, 'title':sTitle})) MAIN_CAT_TAB = [{'category':'search', 'title': _('Search'), 'search_item':True }, {'category':'search_history', 'title': _('Search history'), }] self.listsTab(MAIN_CAT_TAB, cItem)
def getList(self, cItem): printDBG("BilaSportPwApi.getChannelsList") mainItemsTab = [] sts, data = self.getPage(self.getFullUrl('/schedule.html')) if not sts: return mainItemsTab cUrl = self.cm.meta['url'] data = ph.find(data, ('<table', '>'), '</table>', flags=0)[1] data = ph.findall(data, ('<tr', '>'), '</tr>') for item in data: url = self.getFullUrl(ph.search(item, ph.A)[1], cUrl) icon = self.getFullIconUrl(ph.search(item, ph.IMG)[1], cUrl) item = item.split('</td>', 1) title = ph.clean_html(item[0]) start = ph.getattr(item[-1], 'data-gamestart') end = ph.getattr(item[-1], 'data-gameends') if start and end: title = '[%s - %s] %s' % (start, end, title) desc = ph.clean_html(item[-1].split('</div>', 1)[-1]) mainItemsTab.append( MergeDicts( cItem, { 'type': 'video', 'title': title, 'url': url, 'icon': icon, 'desc': desc })) return mainItemsTab
def listItems(self, cItem): printDBG("Fenixsite.listItems") page = cItem.get('page', 1) url = cItem['url'] if page == 1: url += '-%s-%s' % (page, cItem['f_sort']) sts, data = self.getPage(url) if not sts: return self.setMainUrl(self.cm.meta['url']) mainDesc = self.cleanHtmlStr( ph.find(data, ('<div', '>', 'shortstory-news'), '</div>', flags=0)[1].split('</h1>', 1)[-1]) tmp = ph.find(data, ('<span', '>', 'pagesBlock'), '</td>')[1] tmp = ph.search( tmp, '''<a([^>]+?spages\(\s*?['"]%s['"][^>]*?)>''' % (page + 1))[0] nextPage = self.getFullUrl(ph.getattr(tmp, 'href')) reIcon = re.compile( r'''<img[^>]+?src=(['"])([^>]*?\.(?:jpe?g|png|gif)(?:\?[^\1]*?)?)(?:\1)''', re.I) data = ph.findall(data, ('<div', '>', 'entry'), '</ul>') for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullIconUrl(ph.search(item, reIcon)[1]) title = self.cleanHtmlStr( ph.find(item, ('<h', '>'), '</h', flags=0)[1]) desc = [] tmp = [ ph.find(item, ('<i', '>', 'eye'), '</span', flags=0)[1], ph.find(item, ('<i', '>', 'comments'), '</span', flags=0)[1], ph.find(item, ('<i', '>', 'comments'), '</span', flags=0)[1] ] for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) tmp = ph.find(item, ('<ul', '>', 'title'))[1] desc.append(ph.getattr(tmp, 'title').replace('/', ' (') + ')') self.addVideo({ 'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc) + '[/br]' + mainDesc }) if nextPage: self.addDir( MergeDicts( cItem, { 'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1 }))
def processCaptcha(self, key, referer=None): post_data = None token = '' iteration = 0 if referer != None: self.HTTP_HEADER['Referer'] = referer reCaptchaUrl = 'http://www.google.com/recaptcha/api/fallback?k=%s' % (key) while iteration < 20: #,'cookiefile':self.COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie':True sts, data = self.cm.getPage(reCaptchaUrl, {'header':self.HTTP_HEADER, 'raw_post_data':True}, post_data=post_data) if not sts: SetIPTVPlayerLastHostError(_('Fail to get "%s".') % reCaptchaUrl) return '' printDBG("+++++++++++++++++++++++++++++++++++++++++") printDBG(data) printDBG("+++++++++++++++++++++++++++++++++++++++++") imgUrl = ph.search(data, '"(/recaptcha/api2/payload[^"]+?)"')[0] iteration += 1 message = ph.clean_html(ph.find(data, ('<div', '>', 'imageselect-desc'), '</div>', flags=0)[1]) if not message: message = ph.clean_html(ph.find(data, ('<label', '>', 'fbc-imageselect-message-text'), '</label>', flags=0)[1]) if not message: message = ph.clean_html(ph.find(data, ('<div', '>', 'imageselect-message'), '</div>', flags=0)[1]) if '' == message: token = ph.find(data, ('<div', '>', 'verification-token'), '</div>', flags=0)[1] token = ph.find(data, ('<textarea', '>'), '</textarea>', flags=0)[1].strip() if token == '': token = ph.search(data, '"this\.select\(\)">(.*?)</textarea>')[0] if token == '': token = ph.find(data, ('<textarea', '>'), '</textarea>', flags=0)[1].strip() if '' != token: printDBG('>>>>>>>> Captcha token[%s]' % (token)) else: printDBG('>>>>>>>> Captcha Failed\n\n%s\n\n' % data) break cval = ph.search(data, 'name="c"\s+value="([^"]+)')[0] imgUrl = 'https://www.google.com%s' % (imgUrl.replace('&', '&')) message = ph.clean_html(message) accepLabel = ph.clean_html(ph.search(data, 'type="submit"\s+value="([^"]+)')[0]) filePath = GetTmpDir('.iptvplayer_captcha.jpg') printDBG(">>>>>>>> Captcha message[%s]" % (message)) printDBG(">>>>>>>> Captcha accep label[%s]" % (accepLabel)) printDBG(">>>>>>>> Captcha imgUrl[%s] filePath[%s]" % (imgUrl, filePath)) params = {'maintype': 'image', 'subtypes':['jpeg'], 'check_first_bytes':['\xFF\xD8','\xFF\xD9']} ret = self.cm.saveWebFile(filePath, imgUrl, params) if not ret.get('sts'): SetIPTVPlayerLastHostError(_('Fail to get "%s".') % imgUrl) break retArg = self.sessionEx.waitForFinishOpen(UnCaptchaReCaptchaWidget, imgFilePath=filePath, message=message, title="reCAPTCHA v2", additionalParams={'accep_label':accepLabel}) printDBG('>>>>>>>> Captcha response[%s]' % (retArg)) if retArg is not None and len(retArg) and retArg[0]: answer = retArg[0] printDBG('>>>>>>>> Captcha answer[%s]' % (answer)) post_data = urllib.urlencode({'c': cval, 'response':answer}, doseq=True) else: break return token
def listMain(self, cItem, nextCategory1, nextCategory2): printDBG('SeriesBlanco.listMain') sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, ('<ul', '>', 'sidebar-nav'), '</div>', flags=0)[1] item = ph.find(data, ('<a', '>'), '</a>')[1] url = self.getFullUrl(ph.search(item, ph.A)[1]) title = ph.clean_html(item) if url: self.addDir( MergeDicts(cItem, { 'category': nextCategory1, 'title': title, 'url': url })) item = ph.find(data, ('<a', '>', 'subnav-toggle'), '</ul>')[1] sTitle = ph.clean_html(ph.find(item, ('<a', '>'), '</a>', flags=0)[1]) subItems = [] item = ph.findall(item, ('<li', '>'), '</li>', flags=0) for it in item: title = ph.clean_html(it) url = self.getFullUrl(ph.search(it, ph.A)[1]) subItems.append( MergeDicts(cItem, { 'category': nextCategory2, 'title': title, 'url': url })) if subItems: self.addDir( MergeDicts( cItem, { 'category': 'sub_items', 'title': sTitle, 'sub_items': subItems })) item = ph.find(data, ('<a', '>', 'ultimas-series-anadidas'), '</a>')[1] url = self.getFullUrl(ph.search(item, ph.A)[1]) title = ph.clean_html(item) if url: self.addDir( MergeDicts(cItem, { 'category': nextCategory2, 'title': title, 'url': url })) MAIN_CAT_TAB = [{ 'category': 'search', 'title': _('Search'), 'search_item': True }, { 'category': 'search_history', 'title': _('Search history') }] self.listsTab(MAIN_CAT_TAB, cItem)
def listItems(self, cItem, nextCategory): printDBG("FiliserTv.listItems") baseUrl = cItem['url'] if '?' not in baseUrl: baseUrl += '?' else: baseUrl += '&' page = cItem.get('page', 1) if page > 1: baseUrl += 'page={0}&'.format(page) if cItem.get('genres', '') not in ['-', '']: baseUrl += 'kat={0}&'.format(urllib.quote(cItem['genres'])) if cItem.get('language', '') not in ['-', '']: baseUrl += 'ver={0}&'.format(urllib.quote(cItem['language'])) if cItem.get('year', '0') not in ['0', '-', '']: baseUrl += 'start_year={0}&end_year={1}&'.format(cItem['year'], cItem['year']) if cItem.get('sort_by', '0') not in ['0', '-', '']: baseUrl += 'sort_by={0}&'.format(urllib.quote(cItem['sort_by'])) if cItem.get('order', '0') not in ['0', '-', '']: baseUrl += 'type={0}&'.format(urllib.quote(cItem['order'])) sts, data = self.getPage(self.getFullUrl(baseUrl), self.defaultParams) if not sts: return if '>Następna<' in data: nextPage = True else: nextPage = False data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<section class="item"', '</section>', withMarkers=True) for item in data: url = self.getFullUrl(ph.search(item, ph.A)[1]) icon = self.getFullIconUrl(ph.search(item, ph.IMG)[1].strip()) title = ph.clean_html(ph.getattr(item, 'alt')) if title == '': title = ph.clean_html(ph.getattr(item, 'title')) title1 = ph.clean_html(ph.find(item, ('<h3', '>'), '</h3>', flags=0)[1]) title2 = ph.clean_html(ph.find(item, ('<h4', '>'), '</h4>', flags=0)[1]) desc = ph.clean_html(item.split('<div class="block2">')[-1].replace('<p class="desc">', '[/br]')) params = {'good_for_fav': True, 'title':title, 'url':url, 'icon':icon, 'desc':desc} if '/film/' in url: self.addVideo(params) elif '/serial/' in url: params['category'] = nextCategory self.addDir(params) if nextPage: params = dict(cItem) params.update({'title':_('Next page'), 'page':page + 1}) self.addDir(params)
def listVodFilters(self, cItem, nextCategory): printDBG("EskaGo.listVodFilters") url = cItem['url'].replace('/vod/', '/ajax/vod/') sts, data = self.cm.getPage(url) if not sts: return idx = cItem.get('f_idx', 0) if idx == 0: tmp = ph.find(data, ('<ul', '>', 'cat-box'), ('<div', '>', 'clear'), flags=0)[1].split('</ul>') for sData in tmp: subItems = [] sTitle = self.cleanHtmlStr(ph.find(sData, '<span', '</span>')[1]) if sTitle == '': if len(self.currList): sTitle = self.currList[-1]['title'] subItems = self.currList[-1]['sub_items'] del self.currList[-1] else: continue sData = ph.findall(sData, '<a', '</a>') for item in sData: url = self.cm.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1], self.cm.meta['url']) icon = self.cm.getFullUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1], self.cm.meta['url']) title = self.cleanHtmlStr(item) subItems.append(MergeDicts(cItem, {'url': url, 'title': title, 'icon': icon, 'f_idx': idx + 1})) if len(subItems): self.addDir(MergeDicts(cItem, {'category': 'sub_items', 'title': sTitle, 'sub_items': subItems})) if len(self.currList) == 1: self.currList = self.currList[0]['sub_items'] if len(self.currList): self.currList.insert(0, MergeDicts(cItem, {'title': _('--All--'), 'f_idx': idx + 1})) else: idx = 1 if idx == 1: sData = ph.find(data, ('<div', '>', 'sort'), '</ul>', flags=0)[1] sData = ph.findall(sData, '<a', '</a>') for item in sData: url = ph.search(item, ph.A_HREF_URI_RE)[1] if url == '' or 'javascript' in url: continue url = self.cm.getFullUrl(url, self.cm.meta['url']) title = self.cleanHtmlStr(item) self.addDir(MergeDicts(cItem, {'title': title, 'url': url, 'f_idx': idx + 1})) elif idx == 2: sData = ph.find(data, ('<div', '>', 'sort'), ('<div', '>', 'clear'), flags=0)[1] sData = ph.find(sData, '</ul>', ('<div', '>'), flags=0)[1] sData = ph.findall(sData, '<a', '</a>') for item in sData: url = self.cm.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1], self.cm.meta['url']) title = self.cleanHtmlStr(item) self.addDir(MergeDicts(cItem, {'category': nextCategory, 'title': title, 'url': url}))
def getLinksForVideo(self, cItem): printDBG('TfarjoCom.getLinksForVideo [%s]' % cItem) if 1 == self.up.checkHostSupport(cItem.get('url', '')): videoUrl = cItem['url'].replace('youtu.be/', 'youtube.com/watch?v=') return self.up.getVideoLinkExt(videoUrl) cacheKey = cItem['url'] cacheTab = self.cacheLinks.get(cacheKey, []) if len(cacheTab): return cacheTab self.cacheLinks = {} sts, data = self.getPage(cItem['url']) if not sts: return cUrl = data.meta['url'] self.setMainUrl(cUrl) tmp = ph.find(data, 'function getIframe', '</script>')[1] linkUrl = self.getFullUrl( ph.search(tmp, '[\'"]?url[\'"]?\\s*?:\\s*?[\'"]([^\'^"]+?)[\'"]')[0]) if '/film/' in cUrl: itemType = 'movie' else: itemType = 'episode' linkTest = ph.search( data, '[\'"]?csrf_test_name[\'"]?\\s*?:\\s*?[\'"]([^\'^"]+?)[\'"]')[0] retTab = [] data = re.sub('<!--[\\s\\S]*?-->', '', data) data = ph.findall(data, ('<button', '>', 'getIframe'), ('</button', '>')) for item in data: name = ph.clean_html(item) verType = ph.search( item, 'class=[\'"]players([^\'^"]+?)[\'"]')[0].upper() linkData = ph.find(item, 'getIframe(', ')', False)[1].strip()[1:-1] url = linkUrl + '#' + linkData retTab.append({ 'name': '[%s] %s' % (verType, name), 'url': strwithmeta( url, { 'Referer': cUrl, 'iptv_link_data': linkData, 'iptv_link_test': linkTest, 'iptv_link_type': itemType }), 'need_resolve': 1 }) if retTab: self.cacheLinks[cacheKey] = retTab return retTab
def getLinksForVideo(self, cItem): urlTab = [] if len(self.cacheLinks.get(cItem['url'], [])): return self.cacheLinks[cItem['url']] sts, data = self.getPage(cItem['url']) if not sts: return [] data = ph.findall(data, ('<tr', '>', 'download_link_'), '</tr>') for item in data: host = ph.search(item, '''"download_link_([^'^"]+?)['"]''')[0] #if self.up.checkHostSupport('http://'+host+'/') != 1: continue #printDBG(item) printDBG('------') url = '' item = ph.findall(item, '<a', '</a>') for it in item: #printDBG(it) if self.isNeedProxy(): url = urllib.unquote( ph.search( it, '''href=['"][^'^"]*?%3Fr%3D([^'^"^&]+?)['"&]''') [0]) else: url = ph.search( it, '''href=['"][^'^"]*?\?r=([^'^"]+?)['"]''')[0] if url: #use javascript function to decode url printDBG("crypted url -----> %s" % url) js_params = [{ 'path': GetJSScriptFile('swatchseries_max.byte') }] js_params.append({'code': "pippo('%s'); " % url}) ret = js_execute_ext(js_params) printDBG(ret) if ret: url = ret['data'].replace('\n', '').replace( '\/', '/').replace('"', '') if self.up.checkHostSupport(url): url = strwithmeta(self.getFullUrl(url), {'Referer': self.cm.meta['url']}) urlTab.append({ 'name': host, 'url': url, 'need_resolve': 1 }) break else: continue if len(urlTab): self.cacheLinks[cItem['url']] = urlTab return urlTab
def getLinksForVideo(self, cItem): linksTab = self.cacheLinks.get(cItem['url'], []) if linksTab: return linksTab url_data = cItem['url'].split('?', 1) if len(url_data) != 2: return [] query = {} url_data[1] = url_data[1].split('&') for item in url_data[1]: item = item.split('=', 1) if len(item) != 2: continue query[item[0]] = item[1] url_data[1] = query url_data[1]['server'] = 'alternate' url_data[1]['referrer'] = 'link' url = self.joinLink(url_data) sts, data = self.getPage(url) if sts: e = '1' #ph.clean_html(ph.find(data, ('<span', '>', 'serverActive'), '</a>')[1]) data = self.getFunctionCode(data, 'function streams(') jscode = 'efun=function(){},elem={slideToggle:efun,toggleClass:efun,hide:efun,removeAttr:efun,attr:efun},$=function(){return elem},$.post=function(){print(arguments[0])};document={"querySelector":function(){return {"textContent":"' jscode += e + '"};}};streams();' + data ret = js_execute(jscode) if ret['sts'] and 0 == ret['code']: url = self.getFullUrl(ret['data'].strip(), self.cm.meta['url']) sts, data = self.getPage(url) if sts: printDBG(">>>") printDBG(data) data = ph.find(data, ('<ul', '>'), '</ul>', flags=0)[1].split('</li>') for item in data: tmp = ph.find(item, 'show_player(', ')', flags=0)[1].replace('\\"', '"').replace("\\'", "'") url = self.getFullUrl(ph.search(item, '''['"]((?:https?:)?//[^'^"]+?)['"]''')[0]) if not url: url = ph.search(item, ph.A)[1] if url: name = [] item = ph.findall(item, ('<span', '>'), '</span>', flags=0) for t in item: t = ph.clean_html(t) if t: name.append(t) linksTab.append({'name':' | '.join(name), 'url':url, 'need_resolve':1}) url_data[1]['server'] = '1' url_data[1]['referrer'] = 'link' url = self.joinLink(url_data) linksTab.insert(0, {'name':'Server 1', 'url':url, 'need_resolve':1}) if len(linksTab): self.cacheLinks[cItem['url']] = linksTab return linksTab
def listSearchResult(self, cItem, searchPattern=None, searchType=None): page = cItem.get('page', 0) if page == 0: url = self.getFullUrl('/?s=%s' % urllib.quote(searchPattern)) else: url = cItem['url'] sts, data = self.getPage(url) if not sts: return self.setMainUrl(self.cm.meta['url']) nextPage = '' tmp = ph.find(data, ('{', '}', 'maxPages'))[1] try: nextPage = self.getFullUrl(json_loads(tmp)['nextLink']) except Exception: printExc() data = ph.find(data, ('<div', '>', 'post-'), '</section>')[1] data = ph.rfindall(data, '</div>', ('<div', '>', 'post-'), flags=0) for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullIconUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1]) item = item.split('</h3>', 1) title = self.cleanHtmlStr(item[0]) desc = [] tmp = ph.findall(item[-1], ('<span', '>'), '</span>', flags=0) for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) params = { 'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc) } if '/channel/' in url or '/playlist/' in url: params.update({'name': 'category', 'category': 'list_items'}) self.addDir(params) else: self.addVideo(params) if nextPage: self.addDir( MergeDicts( cItem, { 'category': 'list_search_items', 'url': nextPage, 'title': _('Next page'), 'page': page + 1 }))
def exploreItem(self, cItem): printDBG("GoMovies.exploreItem") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) # trailer trailerUrl = ph.search(data, '''trailer['"]?\s*?:\s*?['"](https?://[^'^"]+?youtube[^'^"]+?)['"]''')[0] if trailerUrl != '' and not trailerUrl.endswith('/'): params = dict(cItem) params.update({'good_for_fav': False, 'title':'%s : %s' % (cItem['title'], _('trailer')), 'url':trailerUrl}) self.addVideo(params) playerUrl = ph.find(data, ('<a', '>', 'watching('), '</a>')[1] playerUrl = self.getFullUrl( ph.getattr(playerUrl, 'href')) params = dict(self.defaultParams) params['header'] = dict(params['header']) params['header']['Referer'] = self.cm.meta['url'] sts, data = self.getPage(playerUrl, params) if not sts: return titlesTab = [] self.cacheLinks = {} data = ph.findall(data, ('<div', '>', 'server-'), ('<div', '>', 'clearfix')) for tmp in data: serverName = ph.clean_html(ph.find(tmp, '<strong', '</strong>')[1]) serverId = ph.search(tmp, '''server\-([0-9]+)''')[0] tmp = ph.findall(tmp, '<a', '</a>') for item in tmp: title = ph.clean_html(item) id = ph.getattr(item, 'sid') playerData = ph.search(item, '''data\-([^=]+?)=['"]([^'^"]+?)['"]''') if playerData[0] == 'strgo': url = 'https://vidload.co/player/' + playerData[1] elif playerData[0] == 'openload': url = 'https://openload.co/embed/' + playerData[1] else: url = self.getFullUrl(playerData[1]) if title not in titlesTab: titlesTab.append(title) self.cacheLinks[title] = [] url = strwithmeta(url, {'id':id, 'server_id':serverId}) self.cacheLinks[title].append({'name':serverName, 'url':url, 'need_resolve':1}) for item in titlesTab: params = dict(cItem) params.update({'good_for_fav': False, 'title':'%s : %s' % (cItem['title'], item), 'links_key':item}) self.addVideo(params)
def listSubCategories(self, cItem, nextCategory): printDBG('CrtankoCom.listSubCategories') sts, data = self.cm.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, ('<ul', '>', 'browse-categories'), '</ul>', flags=0)[1] data = ph.findall(data, ('<li', '>'), '</li>', flags=0) for item in data: title = ph.clean_html(item) url = self.getFullUrl(ph.search(item, ph.A)[1]) icon = self.getFullIconUrl(ph.search(item, ph.IMG)[1]) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon}))