def __search(self, search_link, imdb, titles): try: query = search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'}) r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [i.attrs['href']for i in r if i and cleantitle.get(i.content) in t][0] url = source_utils.strip_domain(r) r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href') r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r] r = [i[0] for i in r if i] return url if imdb in r else None except: return
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])}) r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title') r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r] r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = data['tvshowtitle'] localtvshowtitle = data['localtvshowtitle'] aliases = source_utils.aliases_to_array(eval(data['aliases'])) url = self.__search([localtvshowtitle] + aliases, data['year'], season) if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + aliases, data['year'], season) if not url: return r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom( r, 'ul', attrs={'class': ['list-inline', 'list-film']}) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content) for i in r if i] r = [(i[0], i[1] if re.compile("^(\d+)$").match(i[1]) else '0') for i in r] r = [i[0] for i in r if int(i[1]) == int(episode)][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0] + ' ' + year))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'figure', attrs={'class': 'pretty-figure'}) r = dom_parser.parse_dom(r, 'figcaption') for i in r: title = client.replaceHTMLCodes(i[0]['title']) title = cleantitle.get(title) if title in t: x = dom_parser.parse_dom(i, 'a', req='href') return source_utils.strip_domain(x[0][0]['href']) return except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'}) r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'}) r = dom_parser.parse_dom(r, 'a', req='href') for i in r: title = i[1] if re.search('\*(?:.*?)\*', title) is not None: title = re.sub('\*(?:.*?)\*', '', title) title = cleantitle.get(title) if title in t: return source_utils.strip_domain(i[0]['href']) else: return except: return
def __search_movie(self, imdb, year): try: query = urlparse.urljoin(self.base_link, self.search_link % imdb) y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'}) r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href'), dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})) for i in r] r = [(i[0][0].attrs['href'], re.findall('calendar.+?>.+?(\d{4})', ''.join([x.content for x in i[1]]))) for i in r if i[0] and i[1]] r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year r = [i[0] for i in r if i[1] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, content): try: t = [cleantitle.get(i) for i in set(titles) if i] c = client.request(urlparse.urljoin(self.base_link, self.year_link % int(year)), output='cookie') p = urllib.urlencode({'search': cleantitle.query(titles[0])}) c = client.request(urlparse.urljoin(self.base_link, self.search_link), cookie=c, post=p, output='cookie') r = client.request(urlparse.urljoin(self.base_link, self.type_link % content), cookie=c, post=p) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'}) r = dom_parser.parse_dom(r, 'tr') r = [dom_parser.parse_dom(i, 'td') for i in r] r = [dom_parser.parse_dom(i, 'a', req='href') for i in r] r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r if i] x = [] for i in r: if re.search('(?<=<i>\().*$', i[1]): x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(), re.search('(?<=<i>\().*$', i[1]).group(), i[2])) else: x.append((i[0], i[1], i[1], i[2])) r = [i[0] for i in x if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] == year][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'article') r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = url.replace('/en/', '/de/') video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group() if not video_id: return sources # load player query = self.get_player % (video_id) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'}) # for each hoster for i in r: hoster = dom_parser.parse_dom(i, 'div', attrs={'class': 'les-title'}) hoster = dom_parser.parse_dom(hoster, 'strong') hoster = hoster[0][1] valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue links = dom_parser.parse_dom(i, 'a', attrs={'class': 'ep-item'}) # for each link for i in links: if '1080p' in i[0]['title']: quality = '1080p' elif 'HD' in i[0]['title']: quality = 'HD' else: quality = 'SD' url = i[0]['id'] if not url: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) r = client.request(urlparse.urljoin(self.base_link, url)) r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})} for info, data in r.iteritems(): data = dom_parser.parse_dom(data, 'tr') data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})] data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i] data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]] data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data] data = [(i[0], i[1][0]) for i in data if i[1]] for link, hoster in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) season = data.get('season') episode = data.get('episode') if season and episode: r = urllib.urlencode({ 'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode }) r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r) else: r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom( i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r] r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]] r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r] for url, quli in r: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) quality = dom_parser.parse_dom( r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0] quality, info = source_utils.get_release_quality(quality) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'}) r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r] r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]] for hoster, id in r: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def __search(self, titles, year, imdb): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]] r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t] if len(r) > 1: for i in r: data = client.request(urlparse.urljoin(self.base_link, i)) data = dom_parser.parse_dom( data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name') data = [ re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data ] data = [d[0] for d in data if len(d) > 0 and d[0] == imdb] if len(data) >= 1: url = i else: url = r[0] if url: return source_utils.strip_domain(url) except: return
def resolve(self, url): try: if self.base_link in url: r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'cupe'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'reloading'}) url = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href'] return url except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'TpRwCont'}) r = dom_parser.parse_dom(r, 'main') options1 = dom_parser.parse_dom(r, 'li', attrs={'class': 'STPb'}) options2 = dom_parser.parse_dom(r, 'div', attrs={'class': 'TPlayerTb'}) for o1, o2 in itertools.izip(options1, options2): if 'trailer' in o1[1].lower(): continue elif '1080p' in o1[1].lower(): quality = '1080p' elif '720p' in o1[1].lower(): quality = 'HD' else: quality = 'SD' s = '(?<=src=\")(.*?)(?=\")' if re.match(s, o2[1]) is not None: url = re.search(s, o2[1]).group() else: h = HTMLParser.HTMLParser() h = h.unescape(o2[1]) url = re.search(s, h).group() valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'ko-bind'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'links-table'}) r = dom_parser.parse_dom(r, 'tbody') r = dom_parser.parse_dom(r, 'tr') for i in r: if re.search('(?<=<td>)(HD)(?=</td>)', i[1]): quality = 'HD' else: quality = 'SD' x = dom_parser.parse_dom(i, 'td', attrs={'class': 'name'}, req='data-bind') hoster = re.search("(?<=>).*$", x[0][1]) hoster = hoster.group().lower() url = re.search("http(.*?)(?=')", x[0][0]['data-bind']) url = url.group() valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'downloads'}) r = dom_parser.parse_dom(r, 'table') r = dom_parser.parse_dom(r, 'tbody') r = dom_parser.parse_dom(r, 'tr') for i in r: if re.search('German', i[1]): hoster = re.search('(?<=domain=)(.*?)(?=\")', i[1]) hoster = hoster.group().lower() valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue link = re.search('(?<=links/)(.*?)(?=/)', i[1]) link = link.group() if re.search('<td>HD</td>', i[1]): quality = 'HD' else: quality = 'SD' url = self.__get_link(link) sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'}) r = dom_parser.parse_dom(r, 'iframe', req='src') r = client.request(r[0][0]['src']) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'play_container'}, req='href') r = client.request(r[0][0]['href']) url = self.get_link % ( re.search('(?<=var id = \")(.*?)(?=\")', r).group(), re.search('(?<=var links = \")(.*?)(?=\")', r).group()) r = client.request(url) r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'articleList'}) r = dom_parser.parse_dom(r, 'a') for i in r: if 'http' in i[0]['href']: link = i[0]['href'] elif 'http' in i[0]['onclick']: link = re.search('http(.*?)(?=\")', i[0]['onclick']).group() else: return sources valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def resolve(self, url): try: url = urlparse.urljoin(self.base_link, url) r = client.request(url, referer=self.base_link) r = json.loads(r)['Stream'] r = [(dom_parser.parse_dom(r, 'a', req='href'), dom_parser.parse_dom(r, 'iframe', req='src'))] r = [i[0][0].attrs['href'] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]][0] if not r.startswith('http'): r = urlparse.parse_qs(r) r = [r[i][0] if r[i] and r[i][0].startswith('http') else (i, '') for i in r][0] return r except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape') r = dom_parser.parse_dom(r, 'iframe', req='src') r = [i.attrs['src'] for i in r] for i in r: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.request(url) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( premiered)[0] premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0]) items = dom_parser.parse_dom(result, 'a', attrs={'itemprop': 'url'}) url = [ i.attrs['href'] for i in items if bool( re.compile( '<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' % (season, episode)).search(i.content)) ][0] url = url.encode('utf-8') return url except: return
def __search(self, imdb): try: l = ['1', '15'] r = client.request(urlparse.urljoin(self.base_link, self.search_link % imdb)) r = dom_parser.parse_dom(r, 'table', attrs={'id': 'RsltTableStatic'}) r = dom_parser.parse_dom(r, 'tr') r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'alt': 'language'}, req='src')) for i in r] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].attrs['src']) for i in r if i[0] and i[1]] r = [(i[0], i[1], re.findall('.+?(\d+)\.', i[2])) for i in r] r = [(i[0], i[1], i[2][0] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2])) # german > german/subbed r = [i[0] for i in r if i[2] in l][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, season='0'): try: aj = cache.get(self.__get_ajax_object, 24) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(aj.get('ajax_url'), post={ 'action': aj.get('search'), 'nonce': aj.get('snonce'), 'query': cleantitle.query(titles[0]) }) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] return source_utils.strip_domain(r) except: return
def __get_link(self, link): try: if not link: return query = self.get_link % link query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'boton'}) r = dom_parser.parse_dom(r, 'a', req='href') r = r[0].attrs['href'] return r except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href') r = [(i.attrs['href'], i.content) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) in t][0] return source_utils.strip_domain(r) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'td', attrs={'data-title-name': re.compile('Season %02d' % int(season))}) r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href'] r = client.request(urlparse.urljoin(self.base_link, r)) r = dom_parser.parse_dom(r, 'td', attrs={'data-title-name': re.compile('Episode %02d' % int(episode))}) r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href'] return source_utils.strip_domain(r) except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'hosterSiteVideo'}) r = dom_parser.parse_dom( r, 'li', attrs={'data-lang-key': re.compile('[1|3]')}) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'h4'), 'subbed' if i.attrs['data-lang-key'] == '3' else '') for i in r] r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], i[1], re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2]) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r] r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3]) for i in r] for link, host, quality, info in r: valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return url = urlparse.urljoin(self.base_link, url) url = client.request(url, output='geturl') if season == 1 and episode == 1: season = episode = '' r = client.request(url) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'episodios'}) r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('[^\'"]*%s' % ('-%sx%s' % (season, episode)))})[0].attrs['href'] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, season='0'): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'article', attrs={'class': 'shortstory'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'}) r = dom_parser.parse_dom(r, 'h2') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1]), re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] return source_utils.strip_domain(r) except: return