def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, self.conf_link), XHR=True) r = json.loads(r).get('streamer') r = client.request(r + '%s.mp4/master.m3u8' % url, XHR=True) r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)', r, re.IGNORECASE) r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r] for quality, link in r: sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'de', 'url': link, 'direct': True, 'debridonly': False }) return sources except: return sources
def resolve(self, url): try: url = client.request(urlparse.urljoin(self.base_link, url), output='geturl') if self.base_link not in url: return url header = {'User-Agent': self.user_agent, 'Accept': 'text/html'} self.__login() cookie = self.cookie try: res = client.request(url, headers=header, cookie=cookie, redirect=False, output='geturl') if self.base_link not in res: url = res else: control.infoDialog(control.lang(32572).encode('utf-8'), sound=True, icon='WARNING') except: return return url except: return
def yandex(url): try: cookie = client.request(url, output='cookie') r = client.request(url, cookie=cookie) r = re.sub(r'[^\x00-\x7F]+', ' ', r) sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0] idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0] idclient = binascii.b2a_hex(os.urandom(16)) post = { 'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring } post = urllib.urlencode(post) r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie) r = json.loads(r) url = r['models'][0]['data']['file'] return url except: return
def __login(self): try: if (self.login == '' or self.password == ''): return url = urlparse.urljoin(self.base_link, '/login') post = urllib.urlencode({ 'email': self.login, 'password': self.password, 'autoLogin': '******' }) header = {'User-Agent': self.user_agent, 'Accept': 'text/html'} cookie = client.request(url, headers=header, referer=url, post=post, output='cookie') data = client.request(url, cookie=cookie, output='extended') if '/home/logout' in data[0]: self.cookie = cookie return return except: return
def links(self, url): urls = [] try: if url is None: return r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'entry'}) r = client.parseDOM(r, 'a', ret='href') r1 = [(i) for i in r if 'money' in i][0] r = client.request(r1) r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0] if 'enter the password' in r: plink= client.parseDOM(r, 'form', ret='action')[0] post = {'post_password': '******', 'Submit': 'Submit'} send_post = client.request(plink, post=post, output='cookie') link = client.request(r1, cookie=send_post) else: link = client.request(r1) link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0] link = client.parseDOM(link, 'a', ret='href') link = [(i.split('=')[-1]) for i in link] for i in link: urls.append(i) return urls except: pass
def __search(self, search_link, imdb, titles): try: query = search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'}) r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [i.attrs['href']for i in r if i and cleantitle.get(i.content) in t][0] url = source_utils.strip_domain(r) r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href') r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r] r = [i[0] for i in r if i] return url if imdb in r else None except: return
def __search(self, titles, year, content): try: t = [cleantitle.get(i) for i in set(titles) if i] c = client.request(urlparse.urljoin(self.base_link, self.year_link % int(year)), output='cookie') p = urllib.urlencode({'search': cleantitle.query(titles[0])}) c = client.request(urlparse.urljoin(self.base_link, self.search_link), cookie=c, post=p, output='cookie') r = client.request(urlparse.urljoin(self.base_link, self.type_link % content), cookie=c, post=p) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'}) r = dom_parser.parse_dom(r, 'tr') r = [dom_parser.parse_dom(i, 'td') for i in r] r = [dom_parser.parse_dom(i, 'a', req='href') for i in r] r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r if i] x = [] for i in r: if re.search('(?<=<i>\().*$', i[1]): x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(), re.search('(?<=<i>\().*$', i[1]).group(), i[2])) else: x.append((i[0], i[1], i[1], i[2])) r = [i[0] for i in x if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] == year][0] return source_utils.strip_domain(r) except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) season = data.get('season') episode = data.get('episode') if season and episode: r = urllib.urlencode({ 'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode }) r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r) else: r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom( i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r] r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]] r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r] for url, quli in r: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def __get_premium_cookie(self): try: if (self.user == '' or self.password == ''): raise Exception() login = urlparse.urljoin(self.base_link, self.login_link) post = urllib.urlencode({'username': self.user, 'password': self.password}) cookie = client.request(login, mobile=True, post=post, XHR=True, output='cookie') r = client.request(urlparse.urljoin(self.base_link, 'api'), mobile=True, cookie=cookie) return cookie if r == '1' else '' except: return ''
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) links = dom_parser.parse_dom(r, 'table') links = [i.content for i in links if dom_parser.parse_dom(i, 'span', attrs={'class': re.compile('linkSearch(-a)?')})] links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links)) links = [dom_parser.parse_dom(i, 'a', req='href') for i in links if re.findall('(.+?)\s*\(\d+\)\s*<', i)] links = [i[0].attrs['href'] for i in links if i] url = re.sub('/streams-\d+', '', url) for link in links: if '/englisch/' in link: continue control.sleep(3000) if link != url: r = client.request(urlparse.urljoin(self.base_link, link)) quality = 'SD' info = [] detail = dom_parser.parse_dom(r, 'th', attrs={'class': 'thlink'}) detail = [dom_parser.parse_dom(i, 'a', req='href') for i in detail] detail = [(i[0].attrs['href'], i[0].content.replace('▶', '').strip()) for i in detail if i] if detail: quality, info = source_utils.get_release_quality(detail[0][1]) r = client.request(urlparse.urljoin(self.base_link, detail[0][0])) r = dom_parser.parse_dom(r, 'table') r = [dom_parser.parse_dom(i, 'a', req=['href', 'title']) for i in r if not dom_parser.parse_dom(i, 'table')] r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i if l.attrs['title']] info = ' | '.join(info) for stream_link, hoster in r: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue direct = False if hoster.lower() == 'gvideo': direct = True sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': stream_link, 'info': info, 'direct': direct, 'debridonly': False, 'checkquality': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url, timeout=10) if not result == None: break dom = dom_parser.parse_dom(result, 'div', attrs={ 'class': 'links', 'id': 'noSubs' }) result = dom[0].content links = re.compile( '<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch', re.DOTALL).findall(result) for link in links[:5]: try: url2 = urlparse.urljoin(self.base_link, link[1]) for i in range(2): result2 = client.request(url2, timeout=3) if not result2 == None: break r = re.compile('href="([^"]+)"\s+class="action-btn' ).findall(result2)[0] valid, hoster = source_utils.is_host_valid(r, hostDict) if not valid: continue #log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG) urls, host, direct = source_utils.check_directstreams( r, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: #traceback.print_exc() pass #log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG) return sources except: return sources
def __search(self, titles, year, imdb): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]] r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t] if len(r) > 1: for i in r: data = client.request(urlparse.urljoin(self.base_link, i)) data = dom_parser.parse_dom( data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name') data = [ re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data ] data = [d[0] for d in data if len(d) > 0 and d[0] == imdb] if len(data) >= 1: url = i else: url = r[0] if url: return source_utils.strip_domain(url) except: return
def __get_old_url(self, url): try: r = client.request(url, mobile=True) url = re.findall('url="(.*?)"', r) if len(url) == 0: url = dom_parser.parse_dom(r, 'iframe', req='src')[0].attrs['src'] if "play/se.php" in url: r = client.request(url, mobile=True) return self.__decode_hash(re.findall('link:"(.*?)"', r)[0]) else: return url[0] except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) id = data.get('id') season = data.get('season') episode = data.get('episode') if season and episode: r = client.request(urlparse.urljoin(self.base_link, self.get_episodes), post={ 'series_id': id, 'mlang': 'de', 'season': season, 'episode': episode }) r = json.loads(r).get('episode_links', []) r = [([i.get('id')], i.get('hostername')) for i in r] else: data.update({'lang': 'de'}) r = client.request(urlparse.urljoin(self.base_link, self.get_links), post=data) r = json.loads(r).get('links', []) r = [(i.get('ids'), i.get('hoster')) for i in r] for link_ids, hoster in r: valid, host = source_utils.is_host_valid(hoster, hostDict) if not valid: continue for link_id in link_ids: sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': self.out_link % (link_id, hoster), 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'}) r = dom_parser.parse_dom(r, 'iframe', req='src') r = client.request(r[0][0]['src']) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'play_container'}, req='href') r = client.request(r[0][0]['href']) url = self.get_link % ( re.search('(?<=var id = \")(.*?)(?=\")', r).group(), re.search('(?<=var links = \")(.*?)(?=\")', r).group()) r = client.request(url) r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'articleList'}) r = dom_parser.parse_dom(r, 'a') for i in r: if 'http' in i[0]['href']: link = i[0]['href'] elif 'http' in i[0]['onclick']: link = re.search('http(.*?)(?=\")', i[0]['onclick']).group() else: return sources valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = url.replace('/en/', '/de/') video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group() if not video_id: return sources # load player query = self.get_player % (video_id) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'}) # for each hoster for i in r: hoster = dom_parser.parse_dom(i, 'div', attrs={'class': 'les-title'}) hoster = dom_parser.parse_dom(hoster, 'strong') hoster = hoster[0][1] valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue links = dom_parser.parse_dom(i, 'a', attrs={'class': 'ep-item'}) # for each link for i in links: if '1080p' in i[0]['title']: quality = '1080p' elif 'HD' in i[0]['title']: quality = 'HD' else: quality = 'SD' url = i[0]['id'] if not url: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = data['tvshowtitle'] localtvshowtitle = data['localtvshowtitle'] aliases = source_utils.aliases_to_array(eval(data['aliases'])) url = self.__search([localtvshowtitle] + aliases, data['year'], season) if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + aliases, data['year'], season) if not url: return r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom( r, 'ul', attrs={'class': ['list-inline', 'list-film']}) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content) for i in r if i] r = [(i[0], i[1] if re.compile("^(\d+)$").match(i[1]) else '0') for i in r] r = [i[0] for i in r if int(i[1]) == int(episode)][0] return source_utils.strip_domain(r) except: return
def __search_movie(self, imdb, year): try: query = urlparse.urljoin(self.base_link, self.search_link % imdb) y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'}) r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href'), dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})) for i in r] r = [(i[0][0].attrs['href'], re.findall('calendar.+?>.+?(\d{4})', ''.join([x.content for x in i[1]]))) for i in r if i[0] and i[1]] r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year r = [i[0] for i in r if i[1] in y][0] return source_utils.strip_domain(r) except: return
def __get_json(self, url): try: result = client.request(url) result = re.compile('var\s+subcats\s+=\s*(.*?);').findall(result)[0] return json.loads(result) except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) data.update({'raw': 'true', 'language': 'de'}) data = urllib.urlencode(data) data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data) data = json.loads(data) data = [i[1] for i in data[1].items()] data = [(i['name'].lower(), i['links']) for i in data] for host, links in data: valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue for link in links: try:sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link['URL'], 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.request(url) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( premiered)[0] premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0]) items = dom_parser.parse_dom(result, 'a', attrs={'itemprop': 'url'}) url = [ i.attrs['href'] for i in items if bool( re.compile( '<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' % (season, episode)).search(i.content)) ][0] url = url.encode('utf-8') return url except: return
def __search(self, titles, imdb, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query, XHR=True) r = json.loads(r) r = [(i.get('title'), i.get('custom_fields', {})) for i in r.get('posts', [])] r = [(i[0], i[1]) for i in r if i[0] and i[1]] r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']), i[1].get('IMDb-Link', [''])) for i in r if i] r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0])) for i in r if i[0] and i[1] and i[2] and i[3]] r = [ i[1] for i in r if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y) ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'article') r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) r = client.request(urlparse.urljoin(self.base_link, url)) r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})} for info, data in r.iteritems(): data = dom_parser.parse_dom(data, 'tr') data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})] data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i] data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]] data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data] data = [(i[0], i[1][0]) for i in data if i[1]] for link, hoster in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) data = urllib.urlencode({'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de'}) data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True) data = json.loads(data) data = [(i, data['links'][i]) for i in data['links'] if 'links' in data] data = [(i[0], i[1][0], (i[1][1:])) for i in data] for hoster, quli, links in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue for link in links: try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])}) r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title') r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r] r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape') r = dom_parser.parse_dom(r, 'iframe', req='src') r = [i.attrs['src'] for i in r] for i in r: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0] + ' ' + year))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'figure', attrs={'class': 'pretty-figure'}) r = dom_parser.parse_dom(r, 'figcaption') for i in r: title = client.replaceHTMLCodes(i[0]['title']) title = cleantitle.get(title) if title in t: x = dom_parser.parse_dom(i, 'a', req='href') return source_utils.strip_domain(x[0][0]['href']) return except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'}) r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'}) r = dom_parser.parse_dom(r, 'a', req='href') for i in r: title = i[1] if re.search('\*(?:.*?)\*', title) is not None: title = re.sub('\*(?:.*?)\*', '', title) title = cleantitle.get(title) if title in t: return source_utils.strip_domain(i[0]['href']) else: return except: return
def episodeAbsoluteNumber(self, thetvdb, season, episode): try: url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode)) return int(client.parseDOM(client.request(url), 'absolute_number')[0]) except: pass return episode