def listItems(self, cItem): printDBG("BajeczkiOrg.listItems") sts, data = self.getPage(cItem['url']) if not sts: return nextPage = ph.find(data, ('<a', '>', 'next page-'))[1] nextPage = self.getFullUrl(ph.getattr(nextPage, 'href'), self.cm.meta['url']) descObj = re.compile('''<span[^>]+?>''') data = ph.find(data, '<main', '</main>', flags=0)[1] printDBG(data) data = re.compile('<(?:div|article)[^>]+?hentry[^>]+?>').split(data) for idx in range(1, len(data), 1): item = data[idx] url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) if url == '': continue icon = self.getFullUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1]) item = item.split('</h2>', 1) title = ph.clean_html(item[0]) desc = [] tmp = descObj.split(item[-1]) for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) params = dict(cItem) params = { 'good_for_fav': True, 'title': title, 'url': url, 'icon': icon, 'desc': '[/br]'.join(desc) } self.addVideo(params) if nextPage: self.addDir( MergeDicts( cItem, { 'good_for_fav': False, 'title': _('Next page'), 'url': nextPage }))
def getArticle(self, cItem): otherInfo1 = {} desc= cItem.get('desc','') sts, data = self.getPage(cItem['url']) if sts: lst_dat2=re.findall('class="costum_info">(.*?)(valor">|tag">)(.*?)</div>', data, re.S) if lst_dat2: for (x1,x0,x2) in lst_dat2: if 'اشراف' in x1: otherInfo1['age_limit'] = ph.clean_html(x2) if 'بلد' in x1: otherInfo1['country'] = ph.clean_html(x2) if 'مدة' in x1: otherInfo1['duration'] = ph.clean_html(x2) if 'تاريخ' in x1: otherInfo1['year'] = ph.clean_html(x2) if 'sgeneros' in x1: otherInfo1['genres'] = ph.clean_html(x2) else: lst_dat2=re.findall('class="sgeneros">(.*?)<div', data , re.S) if lst_dat2: otherInfo1['genres'] = ph.clean_html(lst_dat2[0]) lst_dat2=re.findall('wp-content">(.*?)<div', data , re.S) if lst_dat2: desc=ph.clean_html(lst_dat2[0]) icon = cItem.get('icon') title = cItem['title'] return [{'title':title, 'text': desc, 'images':[{'title':'', 'url':icon}], 'other_info':otherInfo1}]
def getArticle(self, cItem): otherInfo1 = {} desc = cItem['desc'] sts, data = self.getPage(cItem['url']) if sts: lst_dat = re.findall('class="HoldINfo(.*?)class="topBar', data, re.S) if lst_dat: lst_dat0 = re.findall("<li>(.*?):(.*?)</li>", lst_dat[0], re.S) for (x1, x2) in lst_dat0: if 'الجودة' in x1: otherInfo1['quality'] = ph.clean_html(x2) if 'تاريخ' in x1: otherInfo1['year'] = ph.clean_html(x2) if 'اللغة' in x1: otherInfo1['language'] = ph.clean_html(x2) if 'النوع' in x1: otherInfo1['genres'] = ph.clean_html(x2) if 'الدولة' in x1: otherInfo1['country'] = ph.clean_html(x2) if 'السنه' in x1: otherInfo1['year'] = ph.clean_html(x2) lst_dat = re.findall('StoryLine">(.*?)</div>', data, re.S) if lst_dat: desc = ph.clean_html(lst_dat[0]) icon = cItem.get('icon') title = cItem['title'] return [{ 'title': title, 'text': desc, 'images': [{ 'title': '', 'url': icon }], 'other_info': otherInfo1 }]
def Billboard_chartsalbums(self, url): printDBG("MusicBox - Billboard charts album") sts, data = self.cm.getPage(url, {'header': HEADER}) if not sts: return data = ph.find(data, ('<div', '>', 'chart-number-one'), ('<div', '>', 'chart-list__expanded-header'))[1] data = re.compile('<div[^>]*?data\-has\-content[^>]*?>').split(data) for item in data: name = ph.clean_html(ph.find(item, ('<div', '>', '__title'), '</div>', flags=0)[1]) artist = ph.clean_html(ph.find(item, ('<div', '>', '__artist'), '</div>', flags=0)[1]) icon = ph.search(item, '\s(https?://[^\s]+?\-174x174\.jpg)\s')[0] if not icon: icon = ph.getattr(item, 'data-srcset').split(' ', 1)[0] if not icon: icon = ph.getattr(item, 'srcset').split(' ', 1)[0] album_name = name params = {'good_for_fav':True, 'name': 'List_album_tracks','title': name + ' - ' + artist, 'page': 0, 'artist': artist, 'album': album_name, 'icon':self.cm.getFullUrl(icon, self.cm.meta['url'])} self.addDir(params)
def showmenu17(self,cItem): Url0=self.MAIN_URL+'/quran' sts, data = self.getPage(Url0) if sts: data_ = re.findall('<div>الروايات</div>(.*?)"/quran/top">', data, re.S) if data_: data_2 = re.findall('<li.*?href="(.*?)">(.*?)</a>', data_[0], re.S) for (url,name) in data_2: self.addDir({'import':cItem['import'],'category' :'host2','title':ph.clean_html(name),'url':self.MAIN_URL+url,'icon':cItem['icon'],'mode': '16'})
def showserieaudiomenu1(self,cItem): uRL = cItem['url'] sts, data = self.getPage(uRL) if sts: data = re.findall('<tr>.*?href="(.*?)">(.*?)<.*?>(.*?)</tr>', data, re.S) for (url,name,desc) in data: desc = ph.clean_html(desc) self.addDir({'import':cItem['import'],'category' :'host2','title':name,'desc':desc,'url':self.MAIN_URL+url,'icon':cItem['icon'],'mode': '22'}) printDBG('self.MAIN_URL+url='+name+'|'+self.MAIN_URL+url)
def getLinksForVideo(self, cItem): printDBG("BajeczkiOrg.getLinksForVideo [%s]" % cItem) urlTab = self.cacheLinks.get(cItem['url'], []) if urlTab: return urlTab self.cacheLinks = {} sts, data = self.getPage(cItem['url']) if not sts: return data = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'entry-content'), ('<aside', '>'))[1] data = re.sub("<!--[\s\S]*?-->", "", data) tmp = ph.find(data, '<video', '</video>', flags=ph.IGNORECASE)[1] tmp = ph.findall(tmp, '<source', '>', flags=ph.IGNORECASE) for item in tmp: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''src=['"]([^'^"]+?)['"]''', ignoreCase=True)[0]) type = self.cm.ph.getSearchGroups(item, '''type=['"]([^'^"]+?)['"]''', ignoreCase=True)[0].lower() if 'mp4' in type: name = self.up.getDomain(url) urlTab.append({'name':name, 'url':strwithmeta(url, {'direct_link':True, 'Referer':self.cm.meta['url']}), 'need_resolve':1}) tmp = ph.findall(data, ('<div', '>', 'data-item'), flags=ph.IGNORECASE|ph.START_E) for item in tmp: if 'sources' not in item: continue item = ph.clean_html(ph.getattr(item, 'data-item')) try: item = json_loads(item) for it in item['sources']: it['type'] = it.get('type', it['src'].split('?', 1)[0].rsplit('.', 1)[-1]).lower() url = strwithmeta(it['src'], {'direct_link':True, 'Referer':self.cm.meta['url']}) if 'mp4' in it['type']: urlTab.append({'name':it['type'], 'url':url, 'need_resolve':1}) elif 'mpeg' in it['type']: urlTab.extend(getDirectM3U8Playlist(url)) except Exception: printExc() tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, '<iframe', '>', caseSensitive=False) for item in tmp: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''src=['"]([^'^"]+?)['"]''', ignoreCase=True)[0]) if 1 == self.up.checkHostSupport(url): name = self.up.getDomain(url) urlTab.append({'name':name, 'url':strwithmeta(url, {'Referer':cItem['url']}), 'need_resolve':1}) if not urlTab: unique = set() data = re.compile('''['">]\s*?(https?://[^'^"^<]*?/watch\?v=[^'^"]+?)\s*?[<'"]''').findall(data) for url in data: if url not in unique: urlTab.append({'name':'Youtube', 'url':strwithmeta(url, {'Referer':cItem['url']}), 'need_resolve':1}) unique.add(url) if urlTab: self.cacheLinks[cItem['url']] = urlTab return urlTab
def showelms(self, cItem): urlo = cItem['url'] img_ = cItem['icon'] sts, data = self.getPage(urlo) if sts: films_list = re.findall('window.location.{1,4}"(.*?)"', data, re.S) if films_list: sts, data = self.getPage(films_list[0]) if sts: printDBG('ddddaaaaaaaaaaaatttaaaaaaaaaa=' + data + '#') if 'class="movies-servers' in data: self.addVideo({ 'import': cItem['import'], 'good_for_fav': True, 'category': 'host2', 'url': urlo, 'title': cItem['title'], 'desc': cItem['desc'], 'icon': cItem['icon'], 'hst': 'tshost' }) else: films_list = re.findall( 'class="navbar.*?href="(.*?)".*?>(.*?)<', data, re.S) if films_list: for (url, titre) in films_list: self.addVideo({ 'import': cItem['import'], 'good_for_fav': True, 'category': 'host2', 'url': url, 'title': titre, 'desc': cItem['desc'], 'icon': cItem['icon'], 'hst': 'tshost' }) else: films_list = re.findall( 'class="one-poster.*?href="(.*?)".*?src="(.*?)".*?<h2>(.*?)</h2>', data, re.S) for (url, image, titre) in films_list: titre = ph.clean_html(titre).replace( 'مترجمة أون لاين+تحميل', '').replace('مترجمة أون لاين وتحميل', '').replace('أنمي', '').replace( 'مترجم', '').strip() self.addDir({ 'import': cItem['import'], 'good_for_fav': True, 'category': 'host2', 'url': url, 'title': titre, 'desc': cItem['desc'], 'icon': image, 'hst': 'tshost', 'mode': '31' })
def showitms(self,cItem): url1=cItem['url'] page=cItem.get('page',1) if '/page/1/' in url1: url1=url1.replace('/page/1/','/page/'+str(page)+'/') else: url1=url1+'page/'+str(page)+'/' sts, data = self.getPage(url1) if sts: films_list = re.findall('ml-item">.*?href="(.*?)".*?title="(.*?)".*?original="(.*?)".*?imdb">(.*?)<.*?desc">(.*?)<div.*?Genre:.*?(<.*?)</div>', data, re.S) for (url,titre,image,imdb,desc,genre) in films_list: desc_ = '' if 'N/A' not in imdb: desc_ = tscolor('\c0000??00')+imdb+'\n' desc_ = desc_+tscolor('\c0000??00')+' Genre: '+tscolor('\c00????00')+ph.clean_html(genre)+'\n' desc = desc_+tscolor('\c0000??00')+' Story: '+tscolor('\c00????00')+ph.clean_html(desc)+'\n' self.addDir({'import':cItem['import'],'good_for_fav':True,'EPG':True,'category' : 'host2','url':url,'title':ph.clean_html(titre),'desc':desc,'icon':self.MAIN_URL+image,'hst':'tshost','mode':'31'}) self.addDir({'import':cItem['import'],'title':tscolor('\c0000??00')+'Page '+str(page+1),'page':page+1,'category' : 'host2','url':cItem['url'],'icon':cItem['icon'],'mode':'30'} )
def showmenu15(self,cItem): Url0='http://ar.assabile.com/quran' sts, data = self.getPage(Url0) if sts: data_ = re.findall('<div>المصاحف المسموعة</div>(.*?)<div>الروايات</div>', data, re.S) if data_: data_2 = re.findall('<li.*?href="(.*?)">(.*?)</a>', data_[0], re.S) for (url,name) in data_2: self.addDir({'import':cItem['import'],'category' :'host2','title':ph.clean_html(name),'url':'http://ar.assabile.com'+url,'icon':cItem['icon'],'mode': '16'})
def getLinksForVideo(self, cItem): printDBG("AllBoxTV.getLinksForVideo [%s]" % cItem) self.tryTologin() if 1 == self.up.checkHostSupport(cItem.get('url', '')): videoUrl = cItem['url'].replace('youtu.be/', 'youtube.com/watch?v=') return self.up.getVideoLinkExt(videoUrl) elif cItem.get('direct_link') == True: return [{ 'name': 'trailer', 'url': cItem['url'], 'need_resolve': 0 }] cacheKey = cItem['url'] cacheTab = self.cacheLinks.get(cacheKey, []) if len(cacheTab): return cacheTab self.cacheLinks = {} retTab = [] sts, data = self.getPage(cItem['url']) if not sts: return data = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'id="sources"'), ('</table', '>'))[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, '<tr', '</tr>') for item in tmp: url = self.getFullUrl( self.cm.ph.getSearchGroups( item, '''href=['"]([^"^']+?)['"]''')[0].replace('&', '&')) if not self.cm.isValidUrl(url): continue name = [] tmp = self.cm.ph.getAllItemsBeetwenMarkers(item, '<td', '</td>')[0:-1] for t in tmp: t = ph.clean_html(t.split('<b>', 1)[-1]) if t != '': name.append(t) name = ' | '.join(name) retTab.append({ 'name': name, 'url': self.getFullUrl(url), 'need_resolve': 1 }) if len(retTab): self.cacheLinks[cacheKey] = retTab else: retTab.append({ 'name': 'one', 'url': cItem['url'], 'need_resolve': 1 }) return retTab
def SearchResult(self,str_ch,page,extra): url_=self.MAIN_URL+'/search/'+str_ch+'/page/'+str(page) sts, data = self.getPage(url_) if sts: lst_data=re.findall('<div class="tags_box">.*?href="(.*?)".*?url\((.*?)\).*?<h1>(.*?)<', data, re.S) for (url1,image,name_eng) in lst_data: name_eng=ph.clean_html(name_eng.strip()) desc0,name_eng = self.uniform_titre(name_eng) self.addDir({'import':extra,'category' : 'host2','title':name_eng,'url':url1,'desc':desc0,'icon':image,'mode':'31','good_for_fav':True,'EPG':True,'hst':'tshost'})
def SearchResult(self,str_ch,page,extra): url_=self.MAIN_URL+'/page/'+str(page)+'/?s='+str_ch sts, data = self.getPage(url_) if sts: films_list = re.findall('movie big">(.*?)<h2.*?title">(.*?)<.*?href="(.*?)">.*?src="(.*?)"', data, re.S) for (desc1,titre,url,image) in films_list: desc = ph.clean_html(desc1) titre = titre.replace('Watch Online','') self.addVideo({'import':extra,'good_for_fav':True,'EPG':True,'category' : 'video','url':url,'title':ph.clean_html(titre),'desc':desc,'icon':image,'hst':'tshost'})
def showitms(self, cItem): url = cItem['url'] page = cItem.get('page', 1) url_ = url + 'page/' + str(page) + '/' sts, data = self.getPage(url_) if sts: cookieHeader = self.cm.getCookieHeader(self.COOKIE_FILE) films_list = re.findall( 'movie big">(.*?)<h2.*?title">(.*?)<.*?href="(.*?)">.*?src="(.*?)"', data, re.S) for (desc1, titre, url, image) in films_list: image = strwithmeta(image, { 'Cookie': cookieHeader, 'User-Agent': self.USER_AGENT }) desc = ph.clean_html(desc1) titre = titre.replace('Watch Online', '') self.addVideo({ 'import': cItem['import'], 'good_for_fav': True, 'EPG': True, 'category': 'host2', 'url': url, 'title': ph.clean_html(titre), 'desc': desc, 'icon': image, 'hst': 'tshost' }) self.addDir({ 'import': cItem['import'], 'title': tscolor('\c0000??00') + 'Page ' + str(page + 1), 'page': page + 1, 'category': 'host2', 'url': cItem['url'], 'icon': cItem['icon'], 'mode': '30' })
def listCats(self, cItem, category): printDBG('HoofootCom.listCats [%s]' % cItem) self.cache = [] sts, data = self.cm.getPage(cItem['url']) if not sts: return sp = re.compile('<li[^>]+?class=[\'"]has-sub[^>]+?>') tmp = self.cm.ph.getDataBeetwenReMarkers(data, sp, re.compile('Community'), False)[1] tmp = sp.split(tmp) for item in tmp: item = item.split('<ul') catTitle = ph.clean_html( self.cm.ph.getDataBeetwenMarkers(item[0], '<a ', '</a>')[1]) catUrl = self.cm.ph.getSearchGroups( item[0], 'href=[\'"]([^\'^"]+?)[\'"]')[0] catTab = [] if 2 == len(item): catData = self.cm.ph.getAllItemsBeetwenMarkers( item[1], '<li>', '</li>') for catItem in catData: url = self.cm.ph.getSearchGroups( catItem, 'href=[\'"]([^\'^"]+?)[\'"]')[0] if '' == url: continue title = ph.clean_html(catItem) catTab.append({ 'title': title, 'url': self.getFullUrl(url) }) params = dict(cItem) params['title'] = catTitle if len(catTab): params.update({'category': category, 'idx': len(self.cache)}) self.cache.append(catTab) self.addDir(params) elif catUrl != '#' and catUrl != '': params.update({ 'category': 'list_items', 'url': self.getFullUrl(catUrl) }) self.addDir(params)
def fillCache(self, cItem): printDBG('OurmatchNet.fillCache [%s]' % cItem) self.cache = {'popular': [], 'trending': [], 'allleagues': []} sts, data = self.cm.getPage(cItem['url']) if not sts: return for marker in [ ('<li class="popular-leagues-list">', '</ul>', 'popular'), ('<li class="trending-competitions">', '</ul>', 'trending') ]: tmp = self.cm.ph.getDataBeetwenMarkers(data, marker[0], marker[1])[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<li ', '</li>') for item in tmp: url = self.cm.ph.getSearchGroups( item, 'href=[\'"](http[^\'^"]+?)[\'"]')[0] if '' == url: continue title = ph.clean_html(item) self.cache[marker[2]].append({'title': title, 'url': url}) tmp = self.cm.ph.getAllItemsBeetwenMarkers(data, '<li class="header">', '</ul>') for division in tmp: division = division.split('<ul class="regions">') if 2 != len(division): continue divisionTitle = ph.clean_html(division[0]) regionsTab = [] regions = self.cm.ph.getAllItemsBeetwenMarkers( division[1], '<li ', '</li>') for region in regions: url = self.cm.ph.getSearchGroups( region, 'href=[\'"](http[^\'^"]+?)[\'"]')[0] if '' == url: continue title = ph.clean_html(region) regionsTab.append({'title': title, 'url': url}) if len(regionsTab): self.cache['allleagues'].append({ 'title': divisionTitle, 'regions_tab': regionsTab })
def getArticle(self, cItem): printDBG("AkoAm.getVideoLinks [%s]" % cItem) otherInfo1 = {} title = cItem['title'] icon = cItem.get('icon', '') desc = cItem.get('desc', '') sts, data = self.getPage(cItem['url']) if sts: lst_dat = re.findall( 'class="font-size-16 text-white mt-2">(.*?)</span>', data, re.S) for elm in lst_dat: elm = ph.clean_html(elm) if (':' in elm) and ('مدة' in elm): otherInfo1['duration'] = elm.split(':', 1)[1].strip() elif (':' in elm) and ('اللغة' in elm): otherInfo1['language'] = elm.split(':', 1)[1].strip() elif (':' in elm) and ('ترجمة' in elm): otherInfo1['translation'] = elm.split(':', 1)[1].strip() elif (':' in elm) and ('جودة' in elm): otherInfo1['quality'] = elm.split(':', 1)[1].strip() elif (':' in elm) and ('انتاج' in elm): otherInfo1['production'] = elm.split(':', 1)[1].strip() elif (':' in elm) and ('سنة' in elm): otherInfo1['year'] = elm.split(':', 1)[1].strip() lst_dat0 = re.findall( 'class="font-size-16 d-flex align-items-center mt-3">(.*?)</div>', data, re.S) if lst_dat0: otherInfo1['genre'] = ph.clean_html(lst_dat0[0]) lst_dat0 = re.findall('header-link text-white">.*?<h2>(.*?)</div>', data, re.S) if lst_dat0: desc = ph.clean_html(lst_dat0[0]) return [{ 'title': title, 'text': desc, 'images': [{ 'title': '', 'url': icon }], 'other_info': otherInfo1 }]
def SearchResult(self, str_ch, page, extra): HTTP_HEADER = {'User-Agent': self.USER_AGENT} defaultParams = {'header': HTTP_HEADER, 'with_metadata': True} url_ = self.MAIN_URL + '/search?s=' + str_ch + '&page=' + str(page) sts, data = self.getPage_(url_, defaultParams) if data: lst_data = re.findall( 'class="content-box">.*?href="(.*?)".*?src="(.*?)"(.*?)<h3>(.*?)</h3>', data, re.S) for (url1, image, desc0, name_eng) in lst_data: #name_eng=name_eng.replace(' اون لاين','') #name_eng=name_eng.replace('مسلسل ','') #name_eng=name_eng.replace('فيلم ','') desc1 = '' lst_inf = re.findall('ti-eye">(.*?)</', desc0, re.S) if lst_inf: desc1 = desc1 + tscolor( '\c00????00') + 'Views: ' + tscolor( '\c00??????') + ph.clean_html(lst_inf[0]) + '\n' lst_inf = re.findall('ti-star">(.*?)</', desc0, re.S) if lst_inf: desc1 = desc1 + tscolor('\c00????00') + 'Rate: ' + tscolor( '\c00??????') + ph.clean_html(lst_inf[0]) + '\n' desc00, name_eng = self.uniform_titre(name_eng) if '://' in image: image = image.split('://')[0] + '://' + Quote( image.split('://')[1]) else: image = cItem['image'] desc = desc00 + desc1 self.addDir({ 'import': extra, 'good_for_fav': True, 'category': 'host2', 'url': url1, 'data_post': '', 'title': ph.clean_html(name_eng), 'desc': desc, 'icon': image, 'mode': '31', 'EPG': True, 'hst': 'tshost' })
def getLinksForVideo(self, cItem): printDBG('MP3COInfo.getLinksForVideo [%s]' % cItem) urlsTab = [] tmp = cItem['url'].split('#', 1) if len(tmp) == 2: sts, data = self.getPage(tmp[0], self.getDefaultParams(True)) if not sts: return cUrl = self.cm.meta['url'] self.setMainUrl(cUrl) try: data = json_loads(data) for item in data['stations']: if item['id'] != tmp[1]: continue url = self.getFullUrl(item['stream_url']) name = ph.clean_html(item['name']) urlsTab.append({ 'url': strwithmeta(url, {'Referer': cUrl}), 'name': name, 'need_resolve': 0 }) except Exception: printExc() else: sts, data = self.getPage(cItem['url']) if not sts: return urlsTab cUrl = self.cm.meta['url'] data = ph.find(data, ('<div', '>', 'actions'), '</div>', flags=0)[1] data = ph.findall(data, ('<a', '>'), '</a>', flags=ph.START_S) for idx in range(1, len(data), 2): url = self.getFullUrl(ph.search(data[idx - 1], ph.A)[1]) name = ph.clean_html(data[idx]) urlsTab.append({ 'url': strwithmeta(url, {'Referer': cUrl}), 'name': name, 'need_resolve': 0 }) return urlsTab
def showelms(self, cItem): urlo = cItem['url'] img_ = cItem['icon'] desc = cItem['desc'] sts, data = self.getPage(urlo) if sts: data_list = re.findall('AA-Season.*?">(.*?)<table>(.*?)</table>', data, re.S) if data_list: for (name, data1) in data_list: data1 = data1.replace('"', '"') self.addMarker({ 'title': '\c00????00' + ph.clean_html(name), 'icon': cItem['icon'] }) data_list1 = re.findall( 'class="Num">(.*?)<a.*?href="(.*?)".*?src="(.*?)".*?href="(.*?)">(.*?)</a>(.*?)</td>', data1, re.S) for (num, url, image, x1, titre, date) in data_list1: titre = 'Episode \c00????00' + ph.clean_html( num ) + '\c00??????: ' + titre + ' \c0000????(' + ph.clean_html( date) + ')' self.addVideo({ 'import': cItem['import'], 'category': 'host2', 'title': titre, 'url': url, 'desc': desc, 'icon': image, 'hst': 'tshost', 'good_for_fav': True }) else: self.addVideo({ 'import': cItem['import'], 'category': 'host2', 'title': cItem['title'], 'url': urlo, 'desc': desc, 'icon': img_, 'hst': 'tshost', 'good_for_fav': True })
def getLinksForVideo(self, cItem): printDBG("FilmPalastTo.getLinksForVideo [%s]" % cItem) linksTab = [] linksTab = self.cacheLinks.get(cItem['url'], []) if len(linksTab) > 0: return linksTab sts, data = self.getPage(cItem['url'], self.defaultParams) if not sts: return [] data = ph.findall(data, ('<ul', '>', 'currentStreamLinks'), '</ul>', flags=0) for item in data: printDBG("FilmPalastTo.getLinksForVideo item [%s]" % item) data_id = ph.getattr(item, 'data-id') data_stamp = ph.getattr(item, 'data-stamp') if data_id and data_stamp: url = strwithmeta( '%s|%s' % (data_id, data_stamp), { 'data_id': data_id, 'data_stamp': data_stamp, 'links_key': cItem['url'] }) else: url = strwithmeta( self.getFullUrl( self.cm.ph.getSearchGroups( item, '''url=['"]([^'^"]+?)['"]''')[0]), {'links_key': cItem['url']}) if url == '': continue title = ph.clean_html( ph.find(item, ('<p', '>'), '</p>', flags=0)[1]) if title == '': title = ph.clean_html(item) linksTab.append({ 'name': title, 'url': strwithmeta(url, {'Referer': cItem['url']}), 'need_resolve': 1 }) if len(linksTab): self.cacheLinks[cItem['url']] = linksTab return linksTab
def showitms(self,cItem): url1=cItem['url'] page=cItem.get('page',1) if '?' in url1: x1,x2=url1.split('?',1) url1=x1+'page/'+str(page)+'/?'+x2 else: url1=url1+'page/'+str(page)+'/' sts, data = self.getPage(url1) if sts: if not '/letters/' in url1: data_list = re.findall('class="TPost C">(.*?)</li>', data, re.S) i=0 for data1 in data_list: i=i+1 data_list1 = re.findall('href="(.*?)".*?src="(.*?)".*?Title">(.*?)<', data1, re.S) if data_list1: url=data_list1[0][0] image=data_list1[0][1] titre=data_list1[0][2] desc='' data_listi = re.findall('class="Year">(.*?)<', data1, re.S) if data_listi: desc=tscolor('\c00??????')+'Year: '+tscolor('\c00????00')+data_listi[0]+' '+tscolor('\c00??????')+'| ' data_listi = re.findall('star">(.*?)<', data1, re.S) if data_listi: desc=desc+tscolor('\c00??????')+'Rate: '+tscolor('\c00????00')+data_listi[0]+' '+tscolor('\c00??????')+'| ' data_listi = re.findall('Qlty">(.*?)<', data1, re.S) if data_listi: desc=desc+tscolor('\c00??????')+'Qualité: '+tscolor('\c00????00')+data_listi[0]+' '+tscolor('\c00??????')+'| ' data_listi = re.findall('Description">(.*?)</p>', data1, re.S) if data_listi: desc=desc+'\\n'+tscolor('\c00??????')+'Résumé: '+tscolor('\c0000????')+ph.clean_html(data_listi[0]) data_listi = re.findall('Genre:(.*?)</p>', data1, re.S) if data_listi: desc=desc+'\\n'+tscolor('\c00??????')+'Genre: '+tscolor('\c00????00')+ph.clean_html(data_listi[0]) data_listi = re.findall('Director:(.*?)</p>', data1, re.S) if data_listi: desc=desc+'\\n'+tscolor('\c00??????')+'Director: '+tscolor('\c00????00')+ph.clean_html(data_listi[0]) data_listi = re.findall('Cast:(.*?)</p>', data1, re.S) if data_listi: desc=desc+'\\n'+tscolor('\c00??????')+'Cast: '+tscolor('\c00????00')+ph.clean_html(data_listi[0]) self.addDir({'import':cItem['import'],'category' : 'host2','url': url,'title':self.cleanHtmlStr(titre),'desc':desc,'icon':image,'mode':'31'}) if i>19: self.addDir({'import':cItem['import'],'title':tscolor('\c0000????')+'Page Suivante','page':page+1,'category' : 'host2','url':cItem['url'],'icon':cItem['icon'],'mode':'30'} ) else: data_list = re.findall('class="Num">.*?href="(.*?)".*?src="(.*?)"(.*?)<strong>(.*?)<.*?class="Info">(.*?)</tr>', data, re.S) i=0 for (url,image,type_,titre,desc1) in data_list: i=i+1 if 'Qlty">TV' in type_: type_=' '+tscolor('\c0000????')+'(Serie)' else: type_='' desc='' data_listi = re.findall('Qlty">(.*?)<', desc1, re.S) if data_listi: desc=desc+tscolor('\c00??????')+'Qualité: '+tscolor('\c00????00')+ph.clean_html(data_listi[0])+' '+tscolor('\c00??????')+'| ' data_listi = re.findall('<td>(.*?)</td', desc1, re.S) if data_listi: desc=desc+tscolor('\c00??????')+'Durée: '+tscolor('\c00????00')+ph.clean_html(data_listi[0])+' '+tscolor('\c00??????')+'| Genre: '+tscolor('\c00????00')+ph.clean_html(data_listi[1]) self.addDir({'import':cItem['import'],'category' : 'host2','url': url,'title':self.cleanHtmlStr(titre)+type_,'desc':desc,'icon':image,'mode':'31'}) if i>19: self.addDir({'import':cItem['import'],'title':tscolor('\c0000????')+'Page Suivante','page':page+1,'category' : 'host2','url':cItem['url'],'icon':cItem['icon'],'mode':'30'} )
def listItems(self, cItem, nextCategory=''): printDBG("FaselhdCOM.listItems [%s]" % cItem) page = cItem.get('page', 1) sts, data = self.getPage(cItem['url']) if not sts: return baseData = data nextPage = self.cm.ph.getDataBeetwenNodes(data, ('<div', '>', 'pagination'), ('</div', '>'))[1] nextPage = self.getFullUrl(self.cm.ph.getSearchGroups(nextPage, '''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>%s<''' % (page + 1))[0]) data = self.cm.ph.getAllItemsBeetwenNodes(data, ('<div', '>', 'one-movie'), ('</a', '>')) printDBG(data) for item in data: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+?)['"]''')[0]) icon = self.getFullIconUrl(self.cm.ph.getSearchGroups(item, '''[\s\-]src=['"]([^'^"]+?)['"]''')[0]) title = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(item, '<h1', '</h1>')[1]) if title == '': title = self.cleanHtmlStr(self.cm.ph.getSearchGroups(item, '''alt=['"]([^'^"]+?)['"]''')[0]) if title == '': continue desc = [] tmp = self.cm.ph.getDataBeetwenNodes(item, ('<div', '>', 'movie-meta'), ('</div', '>'))[1] tmp = self.cm.ph.getAllItemsBeetwenMarkers(tmp, '<span', '</span>') for t in tmp: label = '' if 'fa-star' in t: label = _('Rating:') elif 'fa-eye' in t: label = _('Views:') t = self.cleanHtmlStr(t) if t != '': if label != '': desc.append('%s %s' %(label, t)) else: desc.append(t) if '/seasons/' in self.cm.meta['url'] and not cItem.get('sub_view'): title = '%s - %s' % (cItem['title'], title) self.addDir(MergeDicts(cItem, {'url':url, 'title':title, 'sub_view':True})) else: params = dict(cItem) params.update({'good_for_fav':True, 'title':title, 'url':url, 'icon':icon, 'desc':'[/br]'.join(desc)}) if nextCategory == '' or cItem.get('f_list_episodes'): self.addVideo(params) else: params['category'] = nextCategory self.addDir(params) if not cItem.get('sub_view'): data = ph.findall(baseData, ('<span', '>', 'sub-view'), '</span>') for item in data: if 'display:none' in item: continue url = self.getFullUrl(ph.getattr(item, 'href'), self.cm.meta['url']) title = '%s - %s' % (cItem['title'], ph.clean_html(item)) self.addDir(MergeDicts(cItem, {'url':url, 'title':title, 'sub_view':True})) if self.cm.isValidUrl(nextPage): params = dict(cItem) params.update({'good_for_fav':False, 'title':_("Next page"), 'url':nextPage, 'page':page+1}) self.addDir(params)
def listMain(self, cItem, nextCategory): printDBG("Cinemaxx.listMain") sts, data = self.getPage(self.getMainUrl()) if not sts: return self.setMainUrl(self.cm.meta['url']) subItems = [] tmp = ph.findall(data, ('<div', '>', 'owl-cat'), '</div>') for item in tmp: icon = self.getFullIconUrl(ph.search(item, ph.IMAGE_SRC_URI_RE)[1]) item = ph.find(item, ('<h2', '>'), '</h2>', flags=0)[1] url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) title = self.cleanHtmlStr(item) subItems.append(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url})) printDBG(subItems) sections = ph.find(data, ('<div', '>', 'navbar-collapse'), '</div>')[1] sections = ph.rfindall(sections, '</li>', ('<li', '>', 'nav'), flags=0) for section in sections: tmp = ph.findall(section, ('<a', '>'), '</a>', flags=ph.START_S, limits=1) if not tmp: continue sTitle = ph.clean_html(tmp[1]) sUrl = ph.getattr(tmp[0], 'href') if sUrl == '/': self.addDir(MergeDicts(cItem, {'category': 'sub_items', 'sub_items': subItems, 'title': sTitle})) elif '<ul' in section: subItems = [] section = ph.findall(section, ('<li', '>'), '</li>', flags=0) for item in section: title = ph.clean_html(item) url = self.getFullUrl(ph.search(item, ph.A_HREF_URI_RE)[1]) subItems.append(MergeDicts(cItem, {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url})) if len(subItems): self.addDir(MergeDicts(cItem, {'category': 'sub_items', 'sub_items': subItems, 'title': sTitle})) else: self.addDir(MergeDicts(cItem, {'category': nextCategory, 'url': self.getFullUrl(sUrl), 'title': sTitle})) MAIN_CAT_TAB = [{'category': 'search', 'title': _('Search'), 'search_item': True}, {'category': 'search_history', 'title': _('Search history'), }] self.listsTab(MAIN_CAT_TAB, cItem)
def listCategories(self, cItem, nextCategory): printDBG("BajeczkiOrg.listCategories") sts, data = self.getPage(cItem['url']) if not sts: return data = self.cm.ph.getAllItemsBeetwenNodes(data, ('<div', '>', 'category-bar'), ('</div', '>')) for item in data: url = self.getFullUrl(self.cm.ph.getSearchGroups(item, '''\shref=['"]([^"^']+?)['"]''')[0]) if url == '': continue item = item.split('</span>', 1) title = ph.clean_html(item[0]) desc = ph.clean_html(item[-1]) icon = url + '?fake=need_resolve.jpeg' params = dict(cItem) params = {'good_for_fav': True, 'category': nextCategory, 'title': title, 'url': url, 'icon': icon, 'desc': desc} self.addDir(params)
def getArticle(self, cItem): printDBG("Asia2tv.getVideoLinks [%s]" % cItem) otherInfo1 = {} desc = '' sts, data = self.cm.getPage(cItem['url']) if sts: lst_dat = re.findall( 'info-detail-single">(.*?)</ul>(.*?)</div>(.*?)</div>', data, re.S) if lst_dat: lst_dat0 = re.findall('sdhdfhd">(.*?)</div>', lst_dat[0][0], re.S) if lst_dat0: otherInfo1['quality'] = ph.clean_html(lst_dat0[0]) lst_dat0 = re.findall('post_imdb">(.*?)</div>', lst_dat[0][0], re.S) if lst_dat0: otherInfo1['imdb_rating'] = ph.clean_html(lst_dat0[0]) lst_dat0 = re.findall('box-date">(.*?)</div>', lst_dat[0][0], re.S) if lst_dat0: otherInfo1['age_limit'] = ph.clean_html(lst_dat0[0]) lst_dat0 = re.findall('<li>(.*?)</span>(.*?)</', lst_dat[0][0], re.S) for (x1, x2) in lst_dat0: if 'اسم المسلسل' in x1: otherInfo1['original_title'] = x2 if 'الاسم بالعربي' in x1: otherInfo1['alternate_title'] = x2 if 'الحلقات' in x1: otherInfo1['episodes'] = x2 if 'البلد' in x1: otherInfo1['country'] = x2 if 'موعد البث' in x1: otherInfo1['first_air_date'] = x2 otherInfo1['genre'] = ph.clean_html(lst_dat[0][1]) desc = ph.clean_html(lst_dat[0][2]) icon = cItem.get('icon') title = cItem['title'] return [{ 'title': title, 'text': desc, 'images': [{ 'title': '', 'url': icon }], 'other_info': otherInfo1 }]
def showelms(self,cItem): url1=cItem['url'] img=cItem['icon'] self.addVideo({'import':cItem['import'],'good_for_fav':True,'category' : 'video','url': url1,'title':cItem['title'],'desc':'','icon':img,'hst':'tshost'}) sts, data = self.getPage(url1) if sts: data2=re.findall('<div class="episode-block.*?href="(.*?)".*?>(.*?)</div>', data, re.S) for (url,titre) in data2: titre = ph.clean_html(titre).replace('حلقة رقم','').strip() self.addVideo({'import':cItem['import'],'good_for_fav':True,'category' : 'video','url': url,'title':'E'+titre,'desc':'','icon':img,'hst':'tshost'})
def get_desc(self,desc0,desc1,desc2): desc = '' elm_list = re.findall('StarsIMDB">(.*?)</div>', desc0, re.S) if elm_list: if 'n/A' not in elm_list[0]: desc=desc+tscolor('\c00????00')+'IMDB: '+tscolor('\c00??????')+ph.clean_html(elm_list[0])+'\n' elm_list = re.findall('fa-film">(.*?)</li>', desc1, re.S) if elm_list: if 'n/A' not in elm_list[0]: desc=desc+tscolor('\c00????00')+'Genre: '+tscolor('\c00??????')+ph.clean_html(elm_list[0].replace('</span>','|'))+'\n' elm_list = re.findall('desktop">(.*?)</li>', desc1, re.S) if elm_list: desc=desc+tscolor('\c00????00')+'Quality: '+tscolor('\c00??????')+ph.clean_html(elm_list[0])+'\n' elm_list = re.findall('<li>.*?<span>(.*?)</span>(.*?)</li>', desc2, re.S) for (tt,vv) in elm_list: if 'سنة' in tt: desc=desc+tscolor('\c00????00')+'Year: '+tscolor('\c00??????')+ph.clean_html(vv)+'\n' if 'الإشراف' in tt: desc=desc+tscolor('\c00????00')+'Type: '+tscolor('\c00??????')+ph.clean_html(vv)+'\n' if 'دولة' in tt: desc=desc+tscolor('\c00????00')+'Country: '+tscolor('\c00??????')+ph.clean_html(vv)+'\n' return desc
def doListItems(self, cItem, data): data = ph.rfindall(data, '</div>', ('<div', '>', 'post-')) for item in data: url = self.getFullUrl( ph.search(item, ph.A)[1] ) icon = self.getFullIconUrl( ph.search(item, self.reIMG)[1] ) title = ph.clean_html(ph.find(item, ('<h2', '>'), '</h2>', flags=0)[1]) desc = [] tmp = ph.find(item, ('<div', '>', 'meta'), '</div>', flags=0)[1] tmp = ph.findall(tmp, ('<span', '>'), '</span>', flags=0) for t in tmp: t = ph.clean_html(t) if t: desc.append(t) desc = [' | '.join(desc)] desc.append(ph.clean_html(ph.find(item, ('<p', '>'), '</p>', flags=0)[1])) desc.append(ph.clean_html(ph.find(item, ('<div', '>', 'desc'), '</div>', flags=0)[1])) self.addDir(MergeDicts(cItem, {'good_for_fav':True, 'prev_url':url, 'category':'explore_item', 'title':title, 'url':url, 'icon':icon, 'desc':'[/br]'.join(desc)}))
def _listItems(self, data): retTab = [] data = ph.rfindall(data, '</div>', ('<div', '>', 'box_movie')) printDBG(data) for item in data: printDBG('+++') url = self.getFullUrl(ph.search(item, ph.A)[1]) if not url: continue title = ph.clean_html(ph.rfind(item, '</div>', '</div>')[1]) if title == '': title = ph.clean_html(ph.getattr(item, 'title')) icon = self.getFullIconUrl(ph.getattr(item, 'data-src')) if icon == '': icon = self.getFullIconUrl( ph.search(item, '''\surl\(([^\)]+?)\)''')[0].strip()) desc = [] tmp = ph.find(item, ('<div', '>', 'cats'), '</div>', flags=0)[1] tmp = ph.findall(tmp, ('<a', '>'), '</a>') for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) desc = [', '.join(desc)] tmp = ph.findall(item, ('<', '>', 'badge-small'), ('</', '>', 'a')) for t in tmp: t = ph.clean_html(t) if t != '': desc.append(t) desc = ' | '.join(desc) desc += '[/br]' + ph.clean_html( ph.find(item, ('<p', '>'), '</p>')[1]) retTab.append({ 'title': title, 'url': url, 'icon': icon, 'desc': desc }) return retTab