def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: r = 'search/tvdb/%s?type=show&extended=full' % tvdb r = json.loads(trakt.getTrakt(r)) if not r: return '0' d = r[0]['show']['genres'] if not ('anime' in d or 'animation' in d): return '0' tv_maze = tvmaze.tvMaze() tvshowtitle = tv_maze.showLookup('thetvdb', tvdb) tvshowtitle = tvshowtitle['name'] t = cleantitle.get(tvshowtitle) q = self.search_link % (urllib.quote_plus(tvshowtitle)) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = client.parseDOM(r, 'ol', attrs = {'id': 'searchresult'})[0] r = client.parseDOM(r, 'h2') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in r] r = [i for i in r if t == cleantitle.get(i[1])] r = r[-1][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: if (self.user == '' or self.password == ''): raise Exception() t = cleantitle.get(tvshowtitle) u = urlparse.urljoin(self.base_link, self.search_link) p = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''} p = urllib.urlencode(p) r = client.request(u, post=p, XHR=True) r = json.loads(r) r = [i for i in r if i['meta'].strip().split()[0].lower() == 'tv'] r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2] r = [(i, urlparse.urljoin(self.base_link, i)) for i in r] r = [(i[0], client.request(i[1])) for i in r] r = [(i[0], i[1]) for i in r if not i[1] == None] r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r] r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r if i[1]] r = [i for i in r if year in i[1]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'tv_episode_item') result = client.parseDOM(result, 'div', attrs={'class': 'tv_episode_item'}) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( premiered)[0] premiered = '%s %01d %s' % (premiered[1].replace( '01', 'January').replace('02', 'February').replace( '03', 'March').replace('04', 'April').replace( '05', 'May').replace('06', 'June').replace( '07', 'July').replace('08', 'August').replace( '09', 'September').replace('10', 'October').replace( '11', 'November').replace( '12', 'December'), int( premiered[2]), premiered[0]) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs={'class': 'tv_num_versions'})) for i in result] result = [ (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0 ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [ (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0 ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [ i for i in result if title == cleantitle.get(i[1]) and premiered == i[2] ][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [ i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0] ] url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': 'ar-AR'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() q = self.search_link % urllib.quote_plus(t) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tt'}), client.parseDOM(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def movie(self, imdb, title, localtitle, year): try: url = self.search_link % (cleantitle.geturl(title), year) q = urlparse.urljoin(self.base_link, url) r = proxy.geturl(q) if not r == None: return url t = cleantitle.get(title) q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title)) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r] r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r] r = [(i[0], [x[0] for x in i[1] if x]) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]] url = re.findall('(?://.+?|)(/.+)', r[0])[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: query = urlparse.urljoin(self.base_link, self.search_link) query = query % urllib.quote_plus(title) t = cleantitle.get(title) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: query = self.tvsearch_link % urllib.quote_plus( cleantitle.query(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'free movies')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies')) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [ i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def searchMovie(self, title): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link_2 % urllib.quote_plus(cleantitle.getsearch(title))) r = client.request(url, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) url = [i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1])][0] return url except: return
def movie(self, imdb, title, localtitle, year): try: langMap = { 'hi': 'hindi', 'ta': 'tamil', 'te': 'telugu', 'ml': 'malayalam', 'kn': 'kannada', 'bn': 'bengali', 'mr': 'marathi', 'pa': 'punjabi' } lang = 'http://www.imdb.com/title/%s/' % imdb lang = client.request(lang) lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang) lang = [i for i in lang if 'primary_language' in i] lang = [ urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang ] lang = [ i['primary_language'] for i in lang if 'primary_language' in i ] lang = langMap[lang[0][0]] q = self.search_link % (lang, urllib.quote_plus(title)) q = urlparse.urljoin(self.base_link, q) t = cleantitle.get(title) r = client.request(q) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs={'class': 'info'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = str(r) return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: query = base64.b64decode(self.search_link) % urllib.quote_plus( cleantitle.query(tvshowtitle)) result = self.request(query) tvshowtitle = cleantitle.get(tvshowtitle) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [ i for i in result if any(x in str(i['year']) for x in years) ] match = [ i['href'] for i in result if tvshowtitle == cleantitle.get(i['name']) ] match = [ i['href'] for i in result if tvshowtitle == cleantitle.get(i['name']) and str(year) == str(i['year']) ] match2 = [i['href'] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break if imdb in str(self.request(i)[0]['imdb']): url = i break except: pass url = '/' + url.split('/json/')[-1] url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') result = client.parseDOM(result, 'div', attrs={'class': 'tv_episode_item'}) title = cleantitle.get(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result] result = [ (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0 ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [ (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0 ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [ i for i in result if title == cleantitle.get(i[1]) and premiered == i[2] ][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [ i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0] ] url = client.replaceHTMLCodes(url[0][0]) url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def searchMovie(self, title): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'h2')) r = [ i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1]) ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % r) return url except: return
def dizigold_tvcache(self): try: result = client.request(self.base_link) result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0] result = re.compile('href="(.+?)">(.+?)<').findall(result) result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result] result = [(i[0], cleantitle.get(i[1])) for i in result] return result except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: query = self.search_tv_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) r = client.request(query, XHR=True) r = json.loads(r) t = cleantitle.get(tvshowtitle) r = [(i['slug'], i['title'], i['year']) for i in r] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == str(i[2]) ][0] url = r.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title), year) url = client.request(url, output='geturl') if url == None: raise Exception() url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: t = cleantitle.get(title) q = '%s %s' % (title, year) q = self.search_link.decode('base64') % urllib.quote_plus(q) r = client.request(q, error=True) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)\((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [(urllib.unquote_plus(i[0]), i[1], i[2]) for i in r] r = [(urlparse.urlparse(i[0]).path, i[1], i[2]) for i in r] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = re.sub('/watch-movie-|-\d+$', '/', r[0][0].strip()) url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: t = cleantitle.get(tvshowtitle) q = '%s %s' % (tvshowtitle, year) q = self.search_link.decode('base64') % urllib.quote_plus(q) r = client.request(q) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)$', i[1])) for i in r] r = [(i[0], i[1][0].rsplit('TV Series')[0].strip('(')) for i in r if i[1]] r = [(urllib.unquote_plus(i[0]), i[1]) for i in r] r = [(urlparse.urlparse(i[0]).path, i[1]) for i in r] r = [i for i in r if t == cleantitle.get(i[1])] r = urlparse.urljoin(self.base_link, r[0][0].strip()) if '/watch-movie-' in r: r = re.sub('/watch-movie-|-\d+$', '/', r) y = re.findall('(\d{4})', r) if y: y = y[0] else: y = client.request(r) y = re.findall('(?:D|d)ate\s*:\s*(\d{4})', y)[0] if not year == y: raise Exception() url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return result = self.request(url) result = result[0]['episodes'].values() for i, v in enumerate(result): try: result[i] = v.values() except: pass result = [i for i in result if type(i) == list] result = sum(result, []) result = [i for i in result if i['hasLinks'] == True] title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( premiered)[0] premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0]) url = [ i for i in result if title == cleantitle.get(i['name']) and premiered == i['release'] ][:1] if len(url) == 0: url = [i for i in result if premiered == i['release']] if len(url) == 0 or len(url) > 1: url = [ i for i in result if '_s%01d_e%01d' % (int(season), int(episode)) in i['url'] ] url = '/' + url[0]['url'].split('/json/')[-1] url = url.encode('utf-8') return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: t = cleantitle.get(tvshowtitle) q = urllib.quote_plus(cleantitle.query(tvshowtitle)) p = urllib.urlencode({'term': q}) r = client.request(self.search_link, post=p, XHR=True) try: r = json.loads(r) except: r = None r = None if r: r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i] else: r = proxy.request(self.search_link_2 % q, 'tv shows') r = client.parseDOM(r, 'div', attrs={'valign': '.+?'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0], i[1], i[2][-1]) for i in r if i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] url = r[0][0] url = proxy.parse(url) url = url.strip('/').split('/')[-1] url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = '%s/serie/%s' % (self.base_link, url) r = proxy.request(url, 'tv shows') r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'}) t = cleantitle.get(title) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r] r = [(i[0], i[1][0].split(' ')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]] r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]] r = [(i[0][0], i[1], i[2]) for i in r if i[0]] url = [ i for i in r if t == cleantitle.get(i[1]) and premiered == i[2] ][:1] if not url: url = [i for i in r if t == cleantitle.get(i[1])] if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]] if len(url) > 1 or not url: raise Exception() url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def searchMovie(self, title, year): try: title = cleantitle.normalize(title) url = urlparse.urljoin( self.base_link, self.moviesearch_link % (cleantitle.geturl(title.replace('\'', '-')))) r = client.request(url) t = cleantitle.get(title) r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) return url.encode('utf-8') except: return
def movie(self, imdb, title, localtitle, year): try: if debrid.status() == False: raise Exception() t = cleantitle.get(title) query = self.search_link + urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) r = client.request(query, XHR=True) r = json.loads(r) r = [i for i in r if 'category' in i and 'movie' in i['category'].lower()] r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: t = cleantitle.get(title) q = '%s %s' % (title, year) q = self.search_link.decode('base64') % urllib.quote_plus(q) r = client.request(q) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0].split('%')[0], re.findall('(?:^Watch |)(.+?)(?:\(|)(\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i for i in r if '/watch/' in i[0] and not '-season-' in i[0]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: url = re.sub('[^A-Za-z0-9]', '-', title).lower() url = self.moviesearch_link % (url, year) r = urlparse.urljoin(self.base_link, url) r = client.request(r, output='geturl') if not year in r: raise Exception() return url except: return
def movie(self, imdb, title, localtitle, year): try: t = cleantitle.get(title) p = self.post_link % urllib.quote_plus(cleantitle.query(title)) q = urlparse.urljoin(self.base_link, self.search_link) r = proxy.request(q, 'playing top', post=p, XHR=True) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]] r = [(i[0], re.findall('(.+?)\((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i for i in r if t == cleantitle.get(i[1]) and str(year) == i[2]] url = proxy.parse(r[0][0]) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: r = 'search/tvdb/%s?type=show&extended=full' % tvdb r = json.loads(trakt.getTrakt(r)) if not r: return '0' d = r[0]['show']['genres'] if not ('anime' in d or 'animation' in d): return '0' tv_maze = tvmaze.tvMaze() tvshowtitle = tv_maze.showLookup('thetvdb', tvdb) tvshowtitle = tvshowtitle['name'] t = cleantitle.get(tvshowtitle) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(tvshowtitle) r = client.request(q) r = client.parseDOM(r, 'ul', attrs={'class': 'items'}) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r] r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: result = cache.get(self.onlinedizi_tvcache, 120) tvshowtitle = cleantitle.get(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def onlinedizi_tvcache(self): try: result = client.request(self.base_link) result = client.parseDOM(result, 'ul', attrs={'class': 'all-series-list.+?'})[0] result = client.parseDOM(result, 'li') result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [(i[0][-1], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(re.compile('http.+?//.+?/diziler(/.+?/)').findall(i[0]), re.sub('&#\d*;', '', i[1])) for i in result] result = [(i[0][0], cleantitle.get(i[1])) for i in result if len(i[0]) > 0] return result except: return
def sezonlukdizi_tvcache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) for i in range(3): result = client.request(url) if not result == None: break result = re.compile('{(.+?)}').findall(result) result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i), re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i)) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]), re.sub('&#\d*;', '', i[1])) for i in result] result = [(i[0][0] + '/', cleantitle.get(i[1])) for i in result if len(i[0]) > 0] return result except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] u = client.parseDOM(post, 'enclosure', ret='url') u = [i for i in u if not 'openload' in i] if 'tvshowtitle' in data: u = [(re.sub('(720p|1080p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i) for i in u] else: u = [(t, i) for i in u] items += u except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] imdb = data['imdb'] content = 'episode' if 'tvshowtitle' in data else 'movie' query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) try: feed = True url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) if r == None: feed = False posts = client.parseDOM(r, 'item') if not posts: feed = False items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] u = client.parseDOM(post, 'enclosure', ret='url', attrs={'type': 'video.+?'}) if not u: raise Exception() c = client.parseDOM(post, 'content.+?')[0] s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c) s = s[0] if s else '0' u = client.parseDOM(c, 'a', ret='href') items += [(t, i, s) for i in u] except: pass except: pass try: if feed == True: raise Exception() url = self.search_link_2 % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) items = [] ; dupes = [] for post in posts: try: t = client.parseDOM(post, 'a')[0] if content == 'movie': x = re.findall('/(tt\d+)', post)[0] if not x == imdb: raise Exception() q = re.findall('<strong>\s*Video\s*:\s*</strong>.+?\s(\d+)', post)[0] if not int(q) == 1280: raise Exception() if len(dupes) > 3: raise Exception() dupes += [x] elif content == 'episode': x = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t) if not cleantitle.get(title) in cleantitle.get(x): raise Exception() y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper() if not y == hdlr: raise Exception() if len(dupes) > 0: raise Exception() dupes += [x] u = client.parseDOM(post, 'a', ret='href')[0] r = client.request(u).replace('\n', '') u = client.parseDOM(r, 'div', attrs={'class': 'postContent'})[0] u = re.split('id\s*=\s*"more-\d+"', u)[-1] if content == 'episode': u = re.compile('(?:<strong>|)(.+?)</strong>(.+?)(?:<strong>|$)', re.MULTILINE|re.DOTALL).findall(u) u = [(re.sub('<.+?>|</.+?>|>', '', i[0]), i[1]) for i in u] u = [i for i in u if '720p' in i[0].lower()][0] u, r, t = u[1], u[1], u[0] u = client.parseDOM(u, 'p') u = [client.parseDOM(i, 'a', ret='href') for i in u] u = [i[0] for i in u if len(i) == 1] if not u: raise Exception() s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', r) s = s[0] if s else '0' items += [(t, i, s) for i in u] except: pass except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size))/div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] season = '%01d' % int(data['season']) episode = '%02d' % int(data['episode']) r = cache.get(self.ddlseries_tvcache, 120) r = [(i[0], i[3]) for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and season == i[2]] links = [] for url, quality in r: try: link = client.request(url) vidlinks = client.parseDOM(link, 'span', attrs={'class': 'overtr'})[0] match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<' ).findall(vidlinks) match = [(i[0], quality) for i in match if episode == i[1]] links += match except: pass for url, quality in links: try: if "protect-links" in url: redirect = client.request(url) url = re.findall('<a href="(.*?)" target="_blank">', redirect) url = url[0] host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostprDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources