def __search(self, title, year): try: r = client.request(self.base_link) r = re.findall('sL10n\s*=\s*({.*?});', r)[0] r = json.loads(r)['nonce'] query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), r) query = urlparse.urljoin(self.base_link, query) t = cleantitle.get(title) y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(query) r = json.loads(r) r = [(i, r[i].get('url', ''), r[i].get('title', ''), r[i].get('extra', {}).get('names', ''), r[i].get('extra', {}).get('date', '0')) for i in r] r = [(i[0], i[1], client.replaceHTMLCodes(i[2]), client.replaceHTMLCodes(i[3]), i[4]) for i in r] r = [i[1] for i in r if t == cleantitle.get(i[2]) or t == cleantitle.get(i[3]) and i[4] in y][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict] hostDict = [i[0] for i in hostDict] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data['url']) season = data['season'] if 'season' in data else False episode = data['episode'] if 'episode' in data else False r = client.request(url) if season and episode: r = client.parseDOM(r, 'select', attrs={'id': 'SeasonSelection'}, ret='rel')[0] r = client.replaceHTMLCodes(r)[1:] r = urlparse.parse_qs(r) r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r]) r = urlparse.urljoin(self.base_link, self.get_links_epi % (r['Addr'], r['SeriesID'], season, episode)) r = client.request(r) r = client.parseDOM(r, 'ul', attrs={'id': 'HosterList'})[0] r = re.compile('(<li.+?/li>)', re.DOTALL).findall(r) r = [(client.parseDOM(i, 'li', attrs={'id': 'Hoster_\d+'}, ret='rel'), client.parseDOM(i, 'li', attrs={'id': 'Hoster_\d+'})) for i in r] r = [(client.replaceHTMLCodes(i[0][0]), i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1])) for i in r] r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][1], i[1][0][2]) for i in r if len(i[1]) > 0] r = [(i[0], i[1], i[3]) for i in r if i[1] in hostDict] for i in r: u = urlparse.parse_qs('&id=%s' % i[0]) u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u]) for x in range(0, int(i[2])): url = self.mirror_link % (u['id'], u['Hoster'], x + 1) if season and episode: url += "&Season=%s&Episode=%s" % (season, episode) try: sources.append( {'source': i[1], 'quality': 'SD', 'provider': 'KinoX', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def __search(self, title, type, year, season=0, episode=False): try: query = self.search_link % ( type, year, urllib.quote_plus(cleantitle.query(title))) query = urlparse.urljoin(self.base_link, query) t = cleantitle.get(title) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'ml-items'}) r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', attrs={'class': 'ml-name'}, ret='href'), client.parseDOM(i, 'a', attrs={'class': 'ml-name'})) for i in r] r = [(i[0][0], re.sub('<.+?>|</.+?>', '', i[1][0]).strip()) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], i[1], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1].lower())) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season) ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if episode: query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'season-list'}) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[1]) > 0 and int(i[1][0]) == int(episode)] url = re.findall('(?://.+?|)(/.+)', r[0][0])[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return tv_maze = tvmaze.tvMaze() num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode)) num = str(num) url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = r.decode('iso-8859-1').encode('utf-8') r = client.parseDOM(r, 'tr', attrs={'class': ''}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs={'class': 'epnum'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [i[0] for i in r if num == i[1]][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def __search(self, search_link, imdb, title): try: query = search_link % (urllib.quote_plus(cleantitle.query(title))) query = urlparse.urljoin(self.base_link, query) t = cleantitle.get(title) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'big-list'}) r = client.parseDOM(r, 'table', attrs={'class': 'row'}) r = client.parseDOM(r, 'td', attrs={'class': 'list-name'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [i[0] for i in r if t == cleantitle.get(i[1])][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') r = client.request(urlparse.urljoin(self.base_link, url)) r = client.parseDOM(r, 'a', attrs={'href': '[^\'"]+/tt\d+[^\'"]+'}, ret='href') r = [re.findall('.+?(tt\d+).*?', i) for i in r] r = [i[0] for i in r if len(i) > 0] return url if imdb in r else None except: return
def movie(self, imdb, title, year): try: if debrid.status() == False: raise Exception() t = cleantitle.get(title) headers = {'X-Requested-With': 'XMLHttpRequest'} query = self.search_link + urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) r = client.request(query, headers=headers) r = json.loads(r) r = [ i for i in r if 'category' in i and 'movie' in i['category'].lower() ] r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def __search(self, title): try: r = {'keyword': cleantitle.getsearch(title)} r = urllib.urlencode(r) r = client.request(urlparse.urljoin(self.base_link, self.search_link), post=r) t = cleantitle.get(title) r = json.loads(r) r = [(i['link'], re.sub('<.+?>|</.+?>', '', i['title'])) for i in r if 'title' in i and 'link' in i] r = [(i[0], i[1], re.findall('(.+?)\s*Movie \d+:.+?$', i[1], re.DOTALL)) for i in r] r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1]) for i in r] r = [i[0] for i in r if t == cleantitle.get(i[1])][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def __search(self, imdb): try: r = {'story': imdb, 'do': 'search', 'subaction': 'search'} r = urllib.urlencode(r) r = client.request(self.base_link, post=r) r = client.parseDOM(r, 'div', attrs={'class': 'film-table'}) r = [ client.parseDOM(i, 'a', attrs={'class': ''}, ret='href') for i in r ] r = [i[0] for i in r if len(i[0]) > 0] if len(r) > 1: for i in r: data = client.request(i) data = client.parseDOM(data, 'span', attrs={'class': 'imdb-rate'}, ret='onclick') data = [d for d in data if imdb in ("'%s'" % imdb) in d] if len(data) >= 1: url = i else: url = r[0] if url: url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def __search(self, title, year, season='0', episode=False): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(title))) query = urlparse.urljoin(self.base_link, query) t = cleantitle.get(title) y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(query) r = client.parseDOM(r, 'ul', attrs={'class': 'products row'}) r = client.parseDOM(r, 'div', attrs={'class': 'box-product clearfix'}) r = client.parseDOM(r, 'h3', attrs={'class': 'title-product'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1].replace(' hd', ''), i[2], i[3]) for i in r] r = [i[0] for i in r if t == cleantitle.get(i[1]) and i[2] in y and int(i[3]) == int(season)][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') url = url.replace('-info', '-stream') if episode: url = urlparse.urlparse(url).path + '?episode=%s' % int(episode) return url except: return
def movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) t = cleantitle.get(title) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'cell_container'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): if url == None: return url = '/%s-%01d-sezon-%01d-bolum/' % (url.replace('/', ''), int(season), int(episode)) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'tv_episode_item') result = client.parseDOM(result, 'div', attrs={'class': 'tv_episode_item'}) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( premiered)[0] premiered = '%s %01d %s' % (premiered[1].replace( '01', 'January').replace('02', 'February').replace( '03', 'March').replace('04', 'April').replace( '05', 'May').replace('06', 'June').replace( '07', 'July').replace('08', 'August').replace( '09', 'September').replace('10', 'October').replace( '11', 'November').replace( '12', 'December'), int( premiered[2]), premiered[0]) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs={'class': 'tv_num_versions'})) for i in result] result = [ (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0 ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [ (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0 ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [ i for i in result if title == cleantitle.get(i[1]) and premiered == i[2] ][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [ i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0] ] url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sky_list(self, num, channel, id): try: url = self.sky_now_link % id result = client.request(url, timeout='10') result = json.loads(result) match = result['listings'][id][0]['url'] dt1 = (self.uk_datetime).strftime('%Y-%m-%d') dt2 = int((self.uk_datetime).strftime('%H')) if (dt2 < 6): dt2 = 0 elif (dt2 >= 6 and dt2 < 12): dt2 = 1 elif (dt2 >= 12 and dt2 < 18): dt2 = 2 elif (dt2 >= 18): dt2 = 3 url = self.sky_programme_link % (id, str(dt1), str(dt2)) result = client.request(url, timeout='10') result = json.loads(result) result = result['listings'][id] result = [i for i in result if i['url'] == match][0] year = result['d'] year = re.findall('[(](\d{4})[)]', year)[0].strip() year = year.encode('utf-8') title = result['t'] title = title.replace('(%s)' % year, '').strip() title = client.replaceHTMLCodes(title) title = title.encode('utf-8') self.items.append((title, year, channel, num)) except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict] locDict = [i[0] for i in hostDict] result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') r = client.parseDOM(result, 'div', attrs={'class': 'player-embed'})[0] r = client.parseDOM(r, 'iframe', ret='src')[0] links = [(r, url)] r = client.parseDOM(result, 'div', attrs={'class': 'generic-video-item'}) r = [(i.split('</div>', 1)[-1].split()[0], client.parseDOM(i, 'a', ret='href', attrs={'rel': '.+?'})) for i in r] links += [(i[0], i[1][0]) for i in r if i[1]] for i in links: try: try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(i[0].strip().lower()).netloc)[0] except: host = i[0].lower() host = host.rsplit('.', 1)[0] if not host in locDict: raise Exception() host = [x[1] for x in hostDict if x[0] == host][0] host = host.encode('utf-8') url = i[1] url = urlparse.urljoin(self.base_link, url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'tv_episode_item') result = client.parseDOM(result, 'div', attrs={'class': 'tv_episode_item'}) title = cleantitle.get(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result] result = [ (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0 ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [ (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0 ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [ i for i in result if title == cleantitle.get(i[1]) and premiered == i[2] ][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [ i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0] ] url = client.replaceHTMLCodes(url[0][0]) url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) y = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] q = self.search_link % (urllib.quote_plus(cleantitle.query(title)), str(int(year) - 1), str(int(year) + 1)) q = urlparse.urljoin(self.base_link, q) r = proxy.request(q, 'movie_table') r = client.parseDOM(r, 'div', attrs={'class': 'movie_table'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h1')) for i in r] r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]] r = [(i[0], client.parseDOM(i[1], 'a')) for i in r] r = [(i[0], i[1][0]) for i in r if i[1]] r = [i for i in r if any(x in i[1] for x in y)] r = [(proxy.parse(i[0]), i[1]) for i in r] match = [ i[0] for i in r if t == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if match: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'movie25') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = urlparse.urljoin(self.base_link, url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def tvshow(self, imdb, tvdb, tvshowtitle, year): try: query = self.tvsearch_link % urllib.quote_plus( cleantitle.query(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'item')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'item')) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [ i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] else: url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(data['title'])) year = data['year'] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '.php' in url: raise Exception() r = client.request(url, timeout='10') r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'ovie') result = result.replace('\n', '') quality = re.compile('Quality(.+?)<').findall(result) quality = quality[0].strip() if quality else 'SD' if quality == 'CAM' or quality == 'TS': quality = 'CAM' elif quality == 'SCREENER': quality = 'SCR' else: quality = 'SD' dupes = [] links = re.findall('\'(http.+?)\'', result) + re.findall( '\"(http.+?)\"', result) for i in links: try: url = i url = proxy.parse(url) url = urlparse.urlparse(url).query url = url.decode('base64') url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in dupes: raise Exception() dupes.append(url) host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def local(title, imdb, lang): try: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': lang}) t = client.parseDOM(t, 'title')[0] t = re.sub('\((?:.+?|)\d{4}.+', '', t).strip() t = client.replaceHTMLCodes(t) t = t.encode('utf-8') return t except: return title
def movie(self, imdb, title, year): try: q = self.search_link_2.decode('base64') % urllib.quote_plus(title) r = client.request(q) if r == None: r = client.request(q) if r == None: r = client.request(q) if r == None: r = client.request(q) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0], re.findall('(?:^Watch |)(.+? \(\d{4}\))', i[1])) for i in r] r = [(urlparse.urljoin(self.base_link, i[0]), i[1][0]) for i in r if i[1]] t = cleantitle.get(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] r = [i for i in r if any(x in i[1] for x in years)] match = [ i[0] for i in r if t == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'ovie') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'link_ite') links = client.parseDOM(result, 'table', attrs={'class': 'link_ite.+?'}) for i in links: try: url = client.parseDOM(i, 'a', ret='href') url = [x for x in url if 'gtfo' in x][-1] url = proxy.parse(url) url = urlparse.parse_qs( urlparse.urlparse(url).query)['gtfo'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'div', attrs={'class': 'quality'}) if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM' else: quality = 'SD' quality = quality.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def movie(self, imdb, title, year): try: url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title), year) url = client.request(url, output='geturl') if url == None: raise Exception() url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: t = cleantitle.get(title) q = '%s %s' % (title, year) q = self.search_link.decode('base64') % urllib.quote_plus(q) r = client.request(q, error=True) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)\((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [(urllib.unquote_plus(i[0]), i[1], i[2]) for i in r] r = [(urlparse.urlparse(i[0]).path, i[1], i[2]) for i in r] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = re.sub('/watch-movie-|-\d+$', '/', r[0][0].strip()) url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) h = {'User-Agent': client.agent()} r = client.request(url, headers=h, output='extended') s = client.parseDOM(r[0], 'ul', attrs={'class': 'episodes'}) s = client.parseDOM(s, 'a', ret='data.+?') s = [ client.replaceHTMLCodes(i).replace(':', '=').replace( ',', '&').replace('"', '').strip('{').strip('}') for i in s ] for u in s: try: url = '/io/1.0/stream?%s' % u url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = json.loads(r) url = [i['src'] for i in r['streams']] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if (self.user == '' or self.password == ''): raise Exception() if url == None: return url = '%s/season/%01d/episode/%01d' % (url, int(season), int(episode)) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: url = self.search_link % (cleantitle.geturl(title), year) url = urlparse.urljoin(self.base_link, url) url = client.request(url, output='geturl') url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def parse(url): try: url = client.replaceHTMLCodes(url) except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass return url
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'choose_tabs') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] url = proxy.parse(url) url = urlparse.parse_qs( urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM' elif quality == 'quality_dvd': quality = 'SD' else: raise Exception() sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = proxy.request(url, 'movie25') r = r.replace('\n', '') quality = re.findall('>Links - Quality(.+?)<', r) quality = quality[0].strip() if quality else 'SD' if quality == 'CAM' or quality == 'TS': quality = 'CAM' elif quality == 'SCREENER': quality = 'SCR' else: quality = 'SD' links = client.parseDOM(r, 'a', ret='href') links = [x for y, x in enumerate(links) if x not in links[:y]] for i in links: try: url = i url = proxy.parse(url) url = url.strip('/').split('/')[-1] url = url.decode('base64') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def tvshow(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.dizigold_tvcache, 120) tvshowtitle = cleantitle.get(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return