def getVideoLinks(self, videoUrl): printDBG("KKisteAG.getVideoLinks [%s]" % videoUrl) # mark requested link as used one if len(self.cacheLinks.keys()): for key in self.cacheLinks: for idx in range(len(self.cacheLinks[key])): if videoUrl in self.cacheLinks[key][idx]['url']: if not self.cacheLinks[key][idx]['name'].startswith('*'): self.cacheLinks[key][idx]['name'] = '*' + self.cacheLinks[key][idx]['name'] if 1 == self.up.checkHostSupport(videoUrl): return self.up.getVideoLinkExt(videoUrl) params = dict(self.defaultParams) params['header'] = MergeDicts(params['header'], {'Referer':self.getMainUrl()}) videoLinks = [] sts, data = self.getPage(videoUrl, params) if not sts: return videoLinks tmp = ph.find(data, ('<video', '>'), '</video>', flags=0)[1] tmp = ph.findall(tmp, '<source', '>', flags=0) for item in tmp: url = self.getFullUrl(ph.getattr(item, 'src')) type = ph.getattr(item, 'type') label = ph.getattr(item, 'data-res') if not label: label = ph.getattr(item, 'label') if not label: label = type videoLinks.append({'url':strwithmeta(url, {'Cookie':'approve=1;'}), 'name':label}) if not videoLinks: tmp = ph.find(data, 'show_player(', ')', flags=0)[1].replace('\\"', '"').replace("\\'", "'") url = self.getFullUrl(ph.search(tmp, '''['"]((?:https?:)?//[^'^"]+?)['"]''')[0]) return self.up.getVideoLinkExt(url) return videoLinks
def getLinksForVideo(self, cItem): printDBG("FilmPalastTo.getLinksForVideo [%s]" % cItem) linksTab = [] linksTab = self.cacheLinks.get(cItem['url'], []) if len(linksTab) > 0: return linksTab sts, data = self.getPage(cItem['url'], self.defaultParams) if not sts: return [] data = ph.findall(data, ('<ul', '>', 'currentStreamLinks'), '</ul>', flags=0) for item in data: data_id = ph.getattr(item, 'data-id') data_stamp = ph.getattr(item, 'data-stamp') if data_id and data_stamp: url = strwithmeta('%s|%s' % (data_id, data_stamp), {'data_id':data_id, 'data_stamp':data_stamp, 'links_key':cItem['url']}) else: url = strwithmeta(self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]), {'links_key':cItem['url']}) title = ph.clean_html(ph.find(item, ('<p', '>'), '</p>', flags=0)[1]) if title == '': title = ph.clean_html(item) linksTab.append({'name':title, 'url':url, 'need_resolve':1}) if len(linksTab): self.cacheLinks[cItem['url']] = linksTab return linksTab
def getList(self, cItem): printDBG("BilaSportPwApi.getChannelsList") mainItemsTab = [] sts, data = self.getPage(self.getFullUrl('/schedule.html')) if not sts: return mainItemsTab cUrl = self.cm.meta['url'] data = ph.find(data, ('<table', '>'), '</table>', flags=0)[1] data = ph.findall(data, ('<tr', '>'), '</tr>') for item in data: url = self.getFullUrl(ph.search(item, ph.A)[1], cUrl) icon = self.getFullIconUrl(ph.search(item, ph.IMG)[1], cUrl) item = item.split('</td>', 1) title = ph.clean_html(item[0]) start = ph.getattr(item[-1], 'data-gamestart') end = ph.getattr(item[-1], 'data-gameends') if start and end: title = '[%s - %s] %s' % (start, end, title) desc = ph.clean_html(item[-1].split('</div>', 1)[-1]) mainItemsTab.append( MergeDicts( cItem, { 'type': 'video', 'title': title, 'url': url, 'icon': icon, 'desc': desc })) return mainItemsTab
def Billboard_chartsalbums(self, url): sts, data = self.cm.getPage(url, {'header': HEADER}) if not sts: return data = ph.find(data, ('<div', '>', 'chart-number-one'), ('<div', '>', 'chart-list__expanded-header'))[1] data = re.compile('<div[^>]*?data\-has\-content[^>]*?>').split(data) for item in data: name = ph.clean_html( ph.find(item, ('<div', '>', '__title'), '</div>', flags=0)[1]) artist = ph.clean_html( ph.find(item, ('<div', '>', '__artist'), '</div>', flags=0)[1]) icon = ph.search(item, '\s(https?://[^\s]+?\-174x174\.jpg)\s')[0] if not icon: icon = ph.getattr(item, 'data-srcset').split(' ', 1)[0] if not icon: icon = ph.getattr(item, 'srcset').split(' ', 1)[0] album_name = name params = { 'good_for_fav': True, 'name': 'List_album_tracks', 'title': name + ' - ' + artist, 'page': 0, 'artist': artist, 'album': album_name, 'icon': self.cm.getFullUrl(icon, self.cm.meta['url']) } self.addDir(params)
def listMissed(self, cItem, nextCategory): printDBG("C7tvDe.listMissed") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) tmp = ph.find(data, ('<ul', '>', 'site-nav-submenu'), '</ul>', flags=0)[1] tmp = ph.findall(tmp, ('<li', '>'), '</li>', flags=0) for idx, item in enumerate(tmp, 1): channel = ph.getattr(item, 'href').rsplit('/', 1)[-1] self.channelsMap['titles'][channel] = ph.clean_html(item) self.channelsMap['order'][channel] = idx data = ph.find(data, ('<ul', '>', 'tab-list'), '</ul>', flags=0)[1] data = ph.findall(data, ('<li', '>'), '</li>', flags=0) for item in data: title = ph.clean_html(item) url = self.getFullUrl(ph.getattr(item, 'data-href')) self.addDir( MergeDicts(cItem, { 'category': nextCategory, 'url': url, 'title': title }))
def _listItems(self, data): retTab = [] data = ph.rfindall(data, '</div>', ('<div', '>', 'box_movie')) printDBG(data) for item in data: printDBG('+++') url = self.getFullUrl(ph.search(item, ph.A)[1]) if not url: continue title = ph.clean_html(ph.rfind(item, '</div>', '</div>')[1]) if title == '': title = ph.clean_html(ph.getattr(item, 'title')) icon = self.getFullIconUrl(ph.getattr(item, 'data-src')) if icon == '': icon = self.getFullIconUrl(ph.search(item, '''\surl\(([^\)]+?)\)''')[0].strip()) desc = [] tmp = ph.find(item, ('<div', '>', 'cats'), '</div>', flags=0)[1] tmp = ph.findall(tmp, ('<a', '>'), '</a>') for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) desc = [', '.join(desc)] tmp = ph.findall(item, ('<', '>', 'badge-small'), ('</', '>', 'a')) for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) desc = ' | '.join(desc) desc += '[/br]' + ph.clean_html(ph.find(item, ('<p', '>'), '</p>')[1]) retTab.append({'title':title, 'url':url, 'icon':icon, 'desc':desc}) return retTab
def getArticleContent(self, cItem, data=None): printDBG('Fenixsite >>>>>>>>>>>> getArticleContent >> %s' % cItem) retTab = [] if not data: sts, data = self.getPage(cItem['url']) if not sts: return [] self.setMainUrl(self.cm.meta['url']) tmp = ph.find(data, ('<div', '>', 'fullstory'), '</div>', flags=0)[1] title = ph.clean_html(ph.find(tmp, ('<h1', '>'), '</h1>', flags=0)[1]) icon = self.getFullIconUrl(ph.search(tmp, ph.IMAGE_SRC_URI_RE)[1]) desc = ph.clean_html( ph.find(data, ('<div', '>', 'h1'), '</p>', flags=0)[1]) itemsList = [] data = ph.find(data, ('<div', '>', 'finfo'), ('<div', '>', 'berrors'), flags=0)[1] data = ph.rfindall(data, '</div>', ('<div', '>', 'finfo-block'), flags=0) for item in data: key = ph.clean_html( ph.find(item, ('<div', '>', 'title'), '</div>', flags=0)[1]) if 'Pomoc' in key or 'Prijavi' in key: continue if 'imdbRatingPlugin' in item: url = ( 'http://p.media-imdb.com/static-content/documents/v1/title/{0}/ratings%3Fjsonp=imdb.rating.run:imdb.api.title.ratings/data.json?u={1}&s={2}' ).format(ph.getattr(item, 'data-title'), ph.getattr(item, 'data-user'), ph.getattr(item, 'data-style')) try: sts, tmp = self.getPage(url) printDBG('>>' + tmp.strip()[16:-1]) tmp = json_loads(tmp.strip()[16:-1])['resource'] value = '%s (%s)' % (tmp['rating'], tmp['ratingCount']) except Exception: printExc() continue else: value = ph.clean_html( ph.find(item, ('<div', '>', 'text'), '</div>', flags=0)[1].rsplit('</ul>', 1)[(-1)]) itemsList.append((key, value)) if title == '': title = cItem['title'] if icon == '': icon = cItem.get('icon', self.DEFAULT_ICON_URL) if desc == '': desc = cItem.get('desc', '') return [{ 'title': ph.clean_html(title), 'text': ph.clean_html(desc), 'images': [{ 'title': '', 'url': self.getFullUrl(icon) }], 'other_info': { 'custom_items_list': itemsList } }]
def listItems(self, cItem): printDBG("Fenixsite.listItems") page = cItem.get('page', 1) url = cItem['url'] if page == 1: url += '-%s-%s' % (page, cItem['f_sort']) sts, data = self.getPage(url) if not sts: return self.setMainUrl(self.cm.meta['url']) mainDesc = self.cleanHtmlStr( ph.find(data, ('<div', '>', 'shortstory-news'), '</div>', flags=0)[1].split('</h1>', 1)[-1]) tmp = ph.find(data, ('<span', '>', 'pagesBlock'), '</td>')[1] tmp = ph.search( tmp, '''<a([^>]+?spages\(\s*?['"]%s['"][^>]*?)>''' % (page + 1))[0] nextPage = self.getFullUrl(ph.getattr(tmp, 'href')) reIcon = re.compile( r'''<img[^>]+?src=(['"])([^>]*?\.(?:jpe?g|png|gif)(?:\?[^\1]*?)?)(?:\1)''', re.I) data = ph.findall(data, ('<div', '>', 'entry'), '</ul>') for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullIconUrl(ph.search(item, reIcon)[1]) title = self.cleanHtmlStr( ph.find(item, ('<h', '>'), '</h', flags=0)[1]) desc = [] tmp = [ ph.find(item, ('<i', '>', 'eye'), '</span', flags=0)[1], ph.find(item, ('<i', '>', 'comments'), '</span', flags=0)[1], ph.find(item, ('<i', '>', 'comments'), '</span', flags=0)[1] ] for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) tmp = ph.find(item, ('<ul', '>', 'title'))[1] desc.append(ph.getattr(tmp, 'title').replace('/', ' (') + ')') self.addVideo({ 'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc) + '[/br]' + mainDesc }) if nextPage: self.addDir( MergeDicts( cItem, { 'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1 }))
def getLinksForVideo(self, cItem): printDBG("FiliserTv.getLinksForVideo [%s]" % cItem) urlTab = [] if len(self.cacheLinks.get(cItem['url'], [])): return self.cacheLinks[cItem['url']] sts, data = self.getPage(cItem['url'], self.defaultParams) if not sts: return [] errorMessage = ph.clean_html( ph.find(data, ('<h2', '>', 'title_block'), '</section>')[1]) if '' != errorMessage: SetIPTVPlayerLastHostError(errorMessage) lParams = {} tmp = ph.findall(data, ('<div', '>', ph.check(ph.any, ('"box"', "'box'"))), '</section>', flags=ph.START_S, limits=2) if not tmp: return lParams['code'] = ph.getattr(tmp[0], 'data-code') lParams['code2'] = ph.getattr(tmp[0], 'data-code2') lParams['type'] = ph.getattr(tmp[0], 'id').split('_', 1)[0] tmp = ph.findall(tmp[1], ('<h', '>'), ('</h', '>'), flags=0, limits=2) lParams['title1'] = ph.clean_html(tmp[0]) lParams['title2'] = ph.clean_html(tmp[-1]) data = data.split('<div id="links">') if 2 != len(data): return [] tabs = [] tmp = ph.find(data[0], '<div id="video_links"', '<div class="clear">')[1] tmp = re.compile( '<[^>]+?data-type\="([^"]+?)"[^>]*?>([^<]+?)<').findall(tmp) for item in tmp: tabs.append({'key': item[0], 'title': ph.clean_html(item[1])}) if tabs: del data[0] for tab in tabs: tmp = ph.find(data[0], 'data-type="%s"' % tab['key'], '</ul>')[1] tmp = ph.findall(tmp, '<li', '</li>') for item in tmp: url = strwithmeta(ph.getattr(item, 'data-ref'), {'link_params': lParams}) title = ph.clean_html(item.split('<div class="rightSide">')[0]) urlTab.append({ 'name': '%s: %s' % (tab['title'], title), 'url': url, 'need_resolve': 1 }) self.cacheLinks[cItem['url']] = urlTab return urlTab
def listItems(self, cItem, nextCategory): printDBG("FiliserTv.listItems") baseUrl = cItem['url'] if '?' not in baseUrl: baseUrl += '?' else: baseUrl += '&' page = cItem.get('page', 1) if page > 1: baseUrl += 'page={0}&'.format(page) if cItem.get('genres', '') not in ['-', '']: baseUrl += 'kat={0}&'.format(urllib.quote(cItem['genres'])) if cItem.get('language', '') not in ['-', '']: baseUrl += 'ver={0}&'.format(urllib.quote(cItem['language'])) if cItem.get('year', '0') not in ['0', '-', '']: baseUrl += 'start_year={0}&end_year={1}&'.format(cItem['year'], cItem['year']) if cItem.get('sort_by', '0') not in ['0', '-', '']: baseUrl += 'sort_by={0}&'.format(urllib.quote(cItem['sort_by'])) if cItem.get('order', '0') not in ['0', '-', '']: baseUrl += 'type={0}&'.format(urllib.quote(cItem['order'])) sts, data = self.getPage(self.getFullUrl(baseUrl), self.defaultParams) if not sts: return if '>Następna<' in data: nextPage = True else: nextPage = False data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<section class="item"', '</section>', withMarkers=True) for item in data: url = self.getFullUrl(ph.search(item, ph.A)[1]) icon = self.getFullIconUrl(ph.search(item, ph.IMG)[1].strip()) title = ph.clean_html(ph.getattr(item, 'alt')) if title == '': title = ph.clean_html(ph.getattr(item, 'title')) title1 = ph.clean_html(ph.find(item, ('<h3', '>'), '</h3>', flags=0)[1]) title2 = ph.clean_html(ph.find(item, ('<h4', '>'), '</h4>', flags=0)[1]) desc = ph.clean_html(item.split('<div class="block2">')[-1].replace('<p class="desc">', '[/br]')) params = {'good_for_fav': True, 'title':title, 'url':url, 'icon':icon, 'desc':desc} if '/film/' in url: self.addVideo(params) elif '/serial/' in url: params['category'] = nextCategory self.addDir(params) if nextPage: params = dict(cItem) params.update({'title':_('Next page'), 'page':page + 1}) self.addDir(params)
def exploreItem(self, cItem): printDBG("GoMovies.exploreItem") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) # trailer trailerUrl = ph.search(data, '''trailer['"]?\s*?:\s*?['"](https?://[^'^"]+?youtube[^'^"]+?)['"]''')[0] if trailerUrl != '' and not trailerUrl.endswith('/'): params = dict(cItem) params.update({'good_for_fav': False, 'title':'%s : %s' % (cItem['title'], _('trailer')), 'url':trailerUrl}) self.addVideo(params) playerUrl = ph.find(data, ('<a', '>', 'watching('), '</a>')[1] playerUrl = self.getFullUrl( ph.getattr(playerUrl, 'href')) params = dict(self.defaultParams) params['header'] = dict(params['header']) params['header']['Referer'] = self.cm.meta['url'] sts, data = self.getPage(playerUrl, params) if not sts: return titlesTab = [] self.cacheLinks = {} data = ph.findall(data, ('<div', '>', 'server-'), ('<div', '>', 'clearfix')) for tmp in data: serverName = ph.clean_html(ph.find(tmp, '<strong', '</strong>')[1]) serverId = ph.search(tmp, '''server\-([0-9]+)''')[0] tmp = ph.findall(tmp, '<a', '</a>') for item in tmp: title = ph.clean_html(item) id = ph.getattr(item, 'sid') playerData = ph.search(item, '''data\-([^=]+?)=['"]([^'^"]+?)['"]''') if playerData[0] == 'strgo': url = 'https://vidload.co/player/' + playerData[1] elif playerData[0] == 'openload': url = 'https://openload.co/embed/' + playerData[1] else: url = self.getFullUrl(playerData[1]) if title not in titlesTab: titlesTab.append(title) self.cacheLinks[title] = [] url = strwithmeta(url, {'id':id, 'server_id':serverId}) self.cacheLinks[title].append({'name':serverName, 'url':url, 'need_resolve':1}) for item in titlesTab: params = dict(cItem) params.update({'good_for_fav': False, 'title':'%s : %s' % (cItem['title'], item), 'links_key':item}) self.addVideo(params)
def listDel2(self, cItem): printDBG("Del.listDel2") sts, data = self.getPage(cItem['url']) if not sts: return data = ph.find(data, ('<h3', '>', 'sectionHead'), ('<script', '>'))[1] data = ph.rfindall(data, '</div>', ('<h3', '>', 'sectionHead')) for section in data: sTitle = self.cleanHtmlStr( ph.find(section, ('<span', '>'), '</span>', flags=0)[1]) section = ph.rfindall(section, '</div>', ('<div', '>', 'item')) subItems = [] for item in section: url = self.cm.getFullUrl( ph.search(item, ph.A_HREF_URI_RE)[1], self.cm.meta['url']) icon = self.cm.getFullUrl(ph.getattr(item, 'src'), self.cm.meta['url']) title = self.cleanHtmlStr(ph.getattr(item, 'title')) desc = [] tmp = [ ph.find(item, ('<div', '>', 'duration'), '</div>', flags=0)[1] ] tmp.extend(ph.findall(item, ('<p', '>'), '</p>', flags=0)) for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) subItems.append({ 'good_for_fav': True, 'type': 'video', 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc) }) if len(subItems): self.addDir( MergeDicts( cItem, { 'title': sTitle, 'category': 'sub_items', 'sub_items': subItems }))
def exploreItem(self, cItem): printDBG("KKisteAG.exploreItem") sts, mainData = self.getPage(cItem['url']) if not sts: return url = self.getFullUrl(ph.search(mainData, ph.IFRAME)[1]) if not url: return if 'season=' in url: sts, data = self.getPage(url + '&referrer=link') if sts: data = data.split('</body>', 1)[1] data = ph.find(data, ('<span', '>', 'server'), '</div>')[1] data = ph.findall(data, ('<span', '>'), '</span>') for item in data: title = ph.clean_html(ph.getattr(item, 'title')) if not title: title = ph.clean_html(item) url = self.getFullUrl(ph.search(item, ph.A)[1]) self.addVideo( MergeDicts( cItem, { 'title': '%s: %s' % (cItem['title'], title), 'url': url })) else: self.addVideo(MergeDicts(cItem, {'url': url}))
def getItems(self, cItem): printDBG("WagasWorldApi.getItems") list = [] page = cItem.get('page', 0) url = cItem['url'] if page > 0: if '?' in url: url += '&' else: url += '?' url += 'page={0}'.format(page) sts, data = self.cm.getPage(url, self.http_params) if not sts: return list nextPage = False if '&page={0}"'.format(page+1) in data: nextPage = True data = ph.find(data, '<div class="view-content">', '</section>')[1] data = data.split('</span>') if len(data): del data[-1] for item in data: title = ph.search(item, '>([^<]+?)</a>')[0] url = self.getFullUrl( ph.getattr(item, 'href') ) icon = self.getFullIconUrl( ph.search(item, ph.IMG)[1] ) if '' != url and '' != title: list.append( {'waga_cat':'explore', 'type':'waga_cat', 'title':ph.clean_html(title), 'icon':icon, 'url':url} ) if nextPage: list.append({'type':'waga_cat', 'waga_cat':'items', 'title':_('Next page'), 'url':cItem['url'], 'page':page+1}) return list
def listCategories(self, cItem, nextCategory): printDBG('FilmstreamvkCom.listCategories') sts, data = self.getPage(cItem['url']) if not sts: return self.addDir( MergeDicts(cItem, { 'category': nextCategory, 'title': _('--All--') })) data = ph.find(data, ('<li', '>', 'cat-item'), '</ul>')[1] data = ph.findall(data, ('<a', '>'), '</a>', flags=ph.START_S) for idx in range(1, len(data), 2): title = ph.clean_html(data[idx]) url = self.getFullUrl(ph.getattr(data[idx - 1], 'href')) self.addDir( MergeDicts(cItem, { 'category': nextCategory, 'url': url, 'title': title })) if not self.currList: self.listSort(MergeDicts(cItem, {'category': nextCategory}), 'list_items')
def listMain(self, cItem): printDBG("FilmyNaDzis.listMainMenu") sts, data = self.getPage(self.getMainUrl()) if not sts: return subItems = [] data = ph.find(data, ('<ul', '>', 'navbar'), '</ul>', flags=0)[1] items = self.cm.ph.getAllItemsBeetwenMarkers(data, '<a', '</a>', withMarkers=True) for item in items: printDBG("-------- > " + item) url = self.getFullUrl(ph.getattr(item, 'href'), self.MAIN_URL) title = ph.clean_html(item) if 'seriale' in url: continue params = { 'good_for_fav': True, 'category': 'list_items', 'url': url, 'title': title } if '/category/' in url: params.update({'type': 'category'}) subItems.append(params) printDBG("subItem: %s" % str(params)) else: printDBG("Item: %s" % str(params)) self.addDir(params) if subItems: params = { 'good_for_fav': False, 'title': _('Categories'), 'type': 'category', 'category': 'sub_items', 'sub_items': subItems } printDBG("Categories: %s" % str(params)) self.addDir(params) tabs = [{ 'category': 'search', 'title': _('Search'), 'search_item': True }, { 'category': 'search_history', 'title': _('Search history'), }] self.listsTab(tabs, cItem)
def Billboard_charts(self, url): sts, data = self.cm.getPage(url, {'header': HEADER}) if not sts: return data = ph.find(data, ('<div', '>', 'chart-number-one'), ('<div', '>', 'chart-list__expanded-header'))[1] data = re.compile('<div[^>]*?data\-has\-content[^>]*?>').split(data) for item in data: name = ph.clean_html( ph.find(item, ('<div', '>', '__title'), '</div>', flags=0)[1]) artist = ph.clean_html( ph.find(item, ('<div', '>', '__artist'), '</div>', flags=0)[1]) icon = self.cm.getFullUrl( ph.search(item, '\s(https?://[^\s]+?\-174x174\.jpg)\s')[0], self.cm.meta['url']) tmp = ph.clean_html(ph.getattr(item, 'data-brightcove-data')) if not icon and tmp: try: tmp = json_loads(tmp) icon = self.cm.getFullUrl(tmp['video_image'], self.cm.meta['url']) except Exception: printExc() track_name = name search_string = urllib.quote(artist + ' ' + track_name + ' music video') params = { 'good_for_fav': True, 'title': name + ' - ' + artist, 'page': search_string, 'icon': icon } self.addVideo(params)
def listMain(self, cItem, nextCategory): printDBG("Fenixsite.listMain") sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, ('<ul', '>', 'navbar'), '</ul>')[1] data = ph.findall(data, ('<li', '>'), '</li>') for item in data: url = ph.getattr(item, 'href') if '/strani_filmovi/' in url or '/strane_serije/' in url: title = self.cleanHtmlStr(item) self.addDir( MergeDicts( cItem, { 'category': nextCategory, 'url': self.getFullUrl(url), 'title': title })) MAIN_CAT_TAB = [{ 'category': nextCategory, 'title': 'Anime', 'url': self.getFullUrl('/load/anime_serije/95') }, { 'category': 'search', 'title': _('Search'), 'search_item': True }, { 'category': 'search_history', 'title': _('Search history'), }] self.listsTab(MAIN_CAT_TAB, cItem)
def listItems(self, cItem, data=None): printDBG('PregledajNET.listItems') page = cItem.get('page', 1) if not data: sts, data = self.cm.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) baseDesc = ph.clean_html(ph.find(data, ('<div', '>', 'description'), '</div>', flags=0)[1]) nextPage = ph.find(data, ('<ul', '>', 'pagination'), '</ul>', flags=0)[1] nextPage = ph.find(nextPage, '<a', '>%s<' % (page + 1))[1] nextPage = self.getFullUrl(ph.search(nextPage, ph.A)[1]) data = ph.find(data, ('<ul', '>', 'browse-video'), '</ul>', flags=0)[1] data = ph.findall(data, ('<li', '>'), '</li>', flags=0) for item in data: tmp = ph.find(item, ('<h3', '>'), '</h3>', flags=0)[1] url = self.getFullUrl(ph.search(tmp, ph.A)[1]) if not url: continue title = ph.clean_html(tmp) tmp = ph.find(item, '<img', '>')[1] icon = self.getFullIconUrl(ph.getattr(tmp, 'data-echo')) if not icon: icon = self.getFullIconUrl(ph.search(tmp, ph.IMG)[1]) desc = [] tmp = ph.rfindall(item, '</div>', ('<div', '>'), flags=0) for t in tmp: t = ph.clean_html(t) if t: desc.append(t) self.addVideo(MergeDicts(cItem, {'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': (' | ').join(desc) + '[/br]' + baseDesc})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1}))
def listSearchResult(self, cItem, searchPattern, searchType): printDBG( "AllBoxTV.listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType)) self.cacheSearch = {} url = self.getFullUrl('/szukaj?query=') + urllib.quote_plus( searchPattern) sts, data = self.getPage(url) if not sts: return nameMap = {'movies': _('Movies'), 'serials': _('TV series')} data = ph.find(data, ('<div', '>', 'tab-content'), ('<div', '>', 'sidebarTitle'))[1] data = re.compile('<div([^>]+?tabpanel[^>]+?)>').split(data) for idx in range(2, len(data), 2): name = ph.getattr(data[idx - 1], 'id') if name not in ['movies', 'serials']: printDBG('SKIP search group %s' % name) continue itemsTab = self._listItems(data[idx]) if not itemsTab: continue params = dict(cItem) params.update({ 'good_for_fav': False, 'category': 'list_search_items', 'f_search_type': name, 'desc': '', 'title': '%s (%s)' % (nameMap[name], len(itemsTab)) }) self.addDir(params) self.cacheSearch[name] = itemsTab
def fillCacheFilters(self, cItem, data): printDBG("DixMax.fillCacheFilters") self.cacheFilters = {} self.cacheFiltersKeys = [] keys = ('f_type', 'f_genre') #('genres[]', 'fichaType[]') tmp = ph.findall(data, ('<select', '>', 'b-multiple'), '</select>', limits=2) for section in tmp: key = keys[len( self.cacheFiltersKeys)] self.cacheFilters[key] = [] section = ph.findall(section, ('<option', '>'), '</option>', ph.START_S) for idx in range(1, len(section), 2): title = self.cleanHtmlStr(section[idx]) value = ph.getattr(section[idx-1], 'value') self.cacheFilters[key].append({'title':title, key:value, key + '_t':title}) if len(self.cacheFilters[key]): self.cacheFilters[key].insert(0, {'title':_('--All--')}) self.cacheFiltersKeys.append(key) key = 'f_year' self.cacheFilters[key] = [{'title':_('--All--')}] currYear = datetime.now().year for year in range(currYear, currYear-20, -1): self.cacheFilters[key].append({'title':'%d-%d' % (year-1, year), key:year}) self.cacheFiltersKeys.append(key) printDBG(self.cacheFilters)
def listCatFilters(self, cItem, nextCategory): printDBG('BBCiPlayer.listCatFilters') sts, data = self.cm.getPage(cItem['url'], self.defaultParams) if not sts: return baseUrl = self.cm.meta['url'] baseUrl = baseUrl[:baseUrl.rfind('/') + 1] data = ph.find(data, ('<select', '>', 'change_sort'), '</select>', flags=0)[1] data = ph.findall(data, ('<option', '>'), '</option>', flags=ph.START_S) for idx in range(1, len(data), 2): url = baseUrl + ph.getattr(data[(idx - 1)], 'value') title = ph.clean_html(data[idx]) self.addDir( MergeDicts( cItem, { 'good_for_fav': True, 'title': title, 'category': nextCategory, 'url': url, 'desc': '\c00????00 Info: \c00??????BBC iPlayer\\n \c00????00Version: \c00??????' + self.HOST_VER + '\\n \c00????00Developer: \c00??????Codermik\\n' }))
def getLinksForVideo(self, cItem): printDBG('FilmstreamvkCom.getLinksForVideo [%s]' % cItem) self.selectDomain() urlTab = [] sts, baseData = self.getPage(cItem['url']) if not sts: return [] cUrl = self.cm.meta['url'] data = baseData urlTab = self._getBaseVideoLink(data) data = ph.find(data, 'keremiya_part', '</div>')[1] data = ph.findall(data, ('<a ', '>'), '</a>') for item in data: url = ph.getattr(item, 'href') name = ph.clean_html(item) if url.startswith('http'): urlTab.append({ 'name': name, 'url': self.getFullUrl(url), 'need_resolve': 1 }) if len(urlTab) < 2: data = self._getAjaxData(baseData, cUrl) urlTab = self._getBaseVideoLink(data) return urlTab
def listSearchItems(self, cItem): printDBG("Christusvincit.listSearchItems") page = cItem.get('page', 1) sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, 'search_result', '</table>', flags=0)[1] data = re.compile('''<div[^>]+?pagenav[^>]*?>''').split(data, 1) if len(data) == 2: nextPage = ph.find(data[-1], ('<a', '>%s<' % (page + 1)))[1] nextPage = self.getFullUrl(ph.getattr(nextPage, 'href')) else: nextPage = '' data = ph.findall(data[0], ('<a', '>', ph.check(ph.any, ('articles.php', 'readarticle.php'))), '</span>') for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullUrl(ph.search(item, self.reImgObj)[1]) item = item.split('</a>', 1) title = self.cleanHtmlStr(item[0]) desc = self.cleanHtmlStr(item[-1]) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': 'explore_item', 'title': title, 'url': url, 'icon': icon, 'desc': desc})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'page': page + 1, 'url': nextPage}))
def getLinksForVideo(self, cItem): printDBG("BajeczkiOrg.getLinksForVideo [%s]" % cItem) urlTab = self.cacheLinks.get(cItem['url'], []) if urlTab: return urlTab self.cacheLinks = {} sts, data = self.getPage(cItem['url']) if not sts: return data = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'entry-content'), ('<aside', '>'))[1] data = re.sub("<!--[\s\S]*?-->", "", data) tmp = ph.find(data, '<video', '</video>', flags=ph.IGNORECASE)[1] tmp = ph.findall(tmp, '<source', '>', flags=ph.IGNORECASE) for item in tmp: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''src=['"]([^'^"]+?)['"]''', ignoreCase=True)[0]) type = self.cm.ph.getSearchGroups(item, '''type=['"]([^'^"]+?)['"]''', ignoreCase=True)[0].lower() if 'mp4' in type: name = self.up.getDomain(url) urlTab.append({'name': name, 'url': strwithmeta(url, {'direct_link': True, 'Referer': self.cm.meta['url']}), 'need_resolve': 1}) tmp = ph.findall(data, ('<div', '>', 'data-item'), flags=ph.IGNORECASE | ph.START_E) for item in tmp: if 'sources' not in item: continue item = ph.clean_html(ph.getattr(item, 'data-item')) try: item = json_loads(item) for it in item['sources']: it['type'] = it.get('type', it['src'].split('?', 1)[0].rsplit('.', 1)[-1]).lower() url = strwithmeta(it['src'], {'direct_link': True, 'Referer': self.cm.meta['url']}) if 'mp4' in it['type']: urlTab.append({'name': it['type'], 'url': url, 'need_resolve': 1}) elif 'mpeg' in it['type']: urlTab.extend(getDirectM3U8Playlist(url)) except Exception: printExc() tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, '<iframe', '>', caseSensitive=False) for item in tmp: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''src=['"]([^'^"]+?)['"]''', ignoreCase=True)[0]) if 1 == self.up.checkHostSupport(url): name = self.up.getDomain(url) urlTab.append({'name': name, 'url': strwithmeta(url, {'Referer': cItem['url']}), 'need_resolve': 1}) if not urlTab: unique = set() data = re.compile('''['">]\s*?(https?://[^'^"^<]*?/watch\?v=[^'^"]+?)\s*?[<'"]''').findall(data) for url in data: if url not in unique: urlTab.append({'name': 'Youtube', 'url': strwithmeta(url, {'Referer': cItem['url']}), 'need_resolve': 1}) unique.add(url) if urlTab: self.cacheLinks[cItem['url']] = urlTab return urlTab
def listCategories(self, cItem, nextCategory): printDBG("Fenixsite.listCategories") sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(self.cm.meta['url']) data = ph.find(data, ('<div', '>', 'owl-box'), '</table>')[1] data = ph.findall(data, ('<a', '>'), '</td>', flags=ph.START_S) itemsList = [] for idx in range(1, len(data), 2): url = self.getFullUrl(ph.getattr(data[idx - 1], 'href')) title = self.cleanHtmlStr(data[idx]) itemsList.append( MergeDicts( cItem, { 'good_for_fav': True, 'category': nextCategory, 'url': url, 'title': title })) if '/strane_serije/' in cItem['url']: subItems = {} letters = [] for item in itemsList: letter = item['title'].decode('utf-8')[0].upper() if letter.isnumeric(): letter = u'#' if letter not in subItems: subItems[letter] = [] letters.append(letter) subItems[letter].append(item) self.addDir( MergeDicts(cItem, { 'title': _('--All--'), 'category': nextCategory })) for letter in letters: self.addDir( MergeDicts( cItem, { 'good_for_fav': False, 'category': 'sub_items', 'sub_items': subItems[letter], 'title': '%s [%d]' % (letter.encode('utf-8'), len(subItems[letter])) })) else: self.currList.append( MergeDicts(cItem, { 'title': _('--All--'), 'category': nextCategory })) self.currList.extend(itemsList)
def listSeries(self, cItem, nextCategory): printDBG('TfarjoCom.listSeries [%s]' % cItem) sts, data = self.getPage(cItem['url']) if not sts: return self.setMainUrl(data.meta['url']) self.addDir( MergeDicts(cItem, { 'category': nextCategory, 'title': _('--All--') })) tmp = ph.find(data, ('<h4', '</h4>', 'Voir Séries'), ('<li', '>', 'genre'))[1] tmp = ph.findall(tmp, ('<a', '>'), '</a>', flags=ph.START_S) for idx in range(1, len(tmp), 2): url = self.getFullUrl(ph.getattr(tmp[(idx - 1)], 'href')) title = ph.clean_html(tmp[idx]) if not title: continue self.addDir( MergeDicts(cItem, { 'category': nextCategory, 'title': title, 'url': url }))
def listItems(self, cItem, nextCategory): printDBG("Cinemaxx.listItems") cItem = dict(cItem) page = cItem.get('page', 1) post_data = cItem.pop('post_data', None) sts, data = self.getPage(cItem['url'], post_data=post_data) if not sts: return self.setMainUrl(self.cm.meta['url']) printDBG(data) tmp = ph.find(data, ('<div', '>', 'pages-numbers'), '</div>')[1] tmp = ph.search(tmp, '''<a([^>]+?)>%s<''' % (page + 1))[0] nextPage = self.getFullUrl(ph.getattr(tmp, 'href')) data = ph.find(data, ('<div', '>', 'shortstory'), ('<div', '>', 'clearfix'))[1] data = ph.rfindall(data, '</div>', ('<div', '>', 'shortstory')) for item in data: url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) icon = self.getFullIconUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1]) title = self.cleanHtmlStr(ph.find(item, ('<h', '>'), '</h', flags=0)[1]) desc = [] tmp = [ph.find(item, ('<', '>', 'current-rating'), ('</', '>'), flags=0)[1] + '/100'] tmp.extend(ph.findall(item, ('<span', '>'), '</span>', flags=0)) for t in tmp: t = self.cleanHtmlStr(t) if t: desc.append(t) self.addDir(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc)})) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav': False, 'title': _('Next page'), 'url': nextPage, 'page': page + 1}))
def _listItems2(self, cItem, nextCategory, data): printDBG('DixMax._listItems2') retLit = [] data = ph.rfindall(data, '</div>', ('<div', '>', 'col-')) for item in data: icon = ph.find(item, '<img', '>', flags=0)[1] icon = self.getFullIconUrl(ph.getattr(icon, 'data-src-lazy')) title = ph.find(item, ('<h3', '>'), '</h3>', flags=0)[1] url = self.getFullUrl(ph.search(title, ph.A)[1]) title = ph.clean_html(title) desc = [] tmp = ph.findall(item, ('<span', '>'), '</span>', flags=0) for t in tmp: t = ph.clean_html(t.replace('</a>', ', ')) if t.endswith(','): t = t[:-1] if t: desc.append(t) retLit.append({'good_for_fav': True, 'type': 'category', 'name': 'category', 'category': nextCategory, 'title': title, 'url': url, 'icon': icon, 'desc': ' | '.join(desc)}) return retLit
def listFolderItems(self, cItem): printDBG("cda.listFolderItems [%s]" % cItem['url']) url = cItem['url'] if '?' in url: url += '&' else: url += '?' url += 'type=pliki' sts, data = self.getPage(url) if not sts: return nextPage = ph.find(data, ('<a', '>', 'btn-primary '))[1] nextPage = self.getFullUrl(ph.clean_html(ph.getattr(nextPage, 'href')), self.cm.meta['url']) data = self.cm.ph.getAllItemsBeetwenNodes(data, ('<div', '>', 'list-when-small'), ('</div', '>')) for item in data: tmp = self.cm.ph.getDataBeetwenNodes(item, ('<a', '>', 'link-title'), ('</a', '>'))[1] url = self.getFullUrl(self.cm.ph.getSearchGroups(tmp, '''\shref=['"]([^'^"]+?)['"]''')[0]) if '/video/' not in url: continue icon = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''\ssrc=['"]([^'^"]+?)['"]''')[0]) desc = [self.cleanHtmlStr(self.cm.ph.getDataBeetwenNodes(item, ('<', '>', 'time-inline'), ('<', '>'), False)[1])] desc.append(self.cleanHtmlStr(self.cm.ph.getSearchGroups(item, '''\salt=['"]([^'^"]+?)['"]''')[0])) title = self.cleanHtmlStr(tmp) params = dict(cItem) params.update({'good_for_fav':True, 'title':title, 'url':url, 'icon':icon, 'desc':'[/br]'.join(desc)}) self.addVideo(params) if nextPage: self.addDir(MergeDicts(cItem, {'good_for_fav':False, 'url':nextPage, 'title':'Następna strona'}))