def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: r = 'search/tvdb/%s?type=show&extended=full' % tvdb r = json.loads(trakt.getTrakt(r)) if not r: return '0' d = r[0]['show']['genres'] if not ('anime' in d or 'animation' in d): return '0' tv_maze = tvmaze.tvMaze() tvshowtitle = tv_maze.showLookup('thetvdb', tvdb) tvshowtitle = tvshowtitle['name'] t = cleantitle.get(tvshowtitle) q = self.search_link % (urllib.quote_plus(tvshowtitle)) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = client.parseDOM(r, 'ol', attrs = {'id': 'searchresult'})[0] r = client.parseDOM(r, 'h2') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in r] r = [i for i in r if t == cleantitle.get(i[1])] r = r[-1][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: url = self.search_link % (cleantitle.geturl(title), year) q = urlparse.urljoin(self.base_link, url) r = proxy.geturl(q) if not r == None: return url t = cleantitle.get(title) q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title)) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r] r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r] r = [(i[0], [x[0] for x in i[1] if x]) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]] url = re.findall('(?://.+?|)(/.+)', r[0])[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': 'ar-AR'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() q = self.search_link % urllib.quote_plus(t) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tt'}), client.parseDOM(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: url = self.tvsearch_link % cleantitle.geturl(tvshowtitle) r = urlparse.urljoin(self.base_link, url) r = client.request(r, limit='1') r = client.parseDOM(r, 'title') if not r: url = 'http://www.imdb.com/title/%s' % imdb url = client.request(url, headers={'Accept-Language':'es-ES'}) url = client.parseDOM(url, 'title')[0] url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip() url = cleantitle.normalize(url.encode("utf-8")) url = self.tvsearch_link % cleantitle.geturl(url) r = urlparse.urljoin(self.base_link, url) r = client.request(r, limit='1') r = client.parseDOM(r, 'title') if not year in r[0]: raise Exception() return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'tv_episode_item') result = client.parseDOM(result, 'div', attrs={'class': 'tv_episode_item'}) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( premiered)[0] premiered = '%s %01d %s' % (premiered[1].replace( '01', 'January').replace('02', 'February').replace( '03', 'March').replace('04', 'April').replace( '05', 'May').replace('06', 'June').replace( '07', 'July').replace('08', 'August').replace( '09', 'September').replace('10', 'October').replace( '11', 'November').replace( '12', 'December'), int( premiered[2]), premiered[0]) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs={'class': 'tv_num_versions'})) for i in result] result = [ (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0 ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [ (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0 ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [ i for i in result if title == cleantitle.get(i[1]) and premiered == i[2] ][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [ i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0] ] url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, year): try: query = urlparse.urljoin(self.base_link, self.search_link) query = query % urllib.quote_plus(title) t = cleantitle.get(title) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('(\d{4})', i)) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() login = urlparse.urljoin(self.base_link, '/login') post = {'username': self.user, 'password': self.password, 'action': 'login'} post = urllib.urlencode(post) cookie = client.request(login, post=post, XHR=True, output='cookie') url = urlparse.urljoin(self.base_link, url) result = client.request(url, cookie=cookie) url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0] url = client.parseDOM(url, 'iframe', ret='src')[0] url = url.replace('https://', 'http://') links = [] try: dec = re.findall('mplanet\*(.+)', url)[0] dec = dec.rsplit('&')[0] dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec) dec = directstream.google(dec) links += [(i['url'], i['quality'], 'gvideo') for i in dec] except: pass result = client.request(url) try: url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result) for i in url: try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i}) except: pass except: pass try: url = client.parseDOM(result, 'source', ret='src') url += re.findall('src\s*:\s*\'(.*?)\'', result) url = [i for i in url if '://' in i] links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]}) except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: query = self.tvsearch_link % urllib.quote_plus( cleantitle.query(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'free movies')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies')) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [ i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'p') hostDict = hostprDict + hostDict locDict = [(i.rsplit('.', 1)[0], i) for i in hostDict] for link in links: try: host = re.findall('Downloads-Server(.+?)(?:\'|\")\)', link)[0] host = host.strip().lower().split()[-1] if host == 'fichier': host = '1fichier' host = [x[1] for x in locDict if host == x[0]][0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(link, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') r = client.parseDOM(link, 'a')[0] fmt = r.strip().lower().split() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', r)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size))/div info = '%.2f GB' % size except: info = '' sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass return sources except: return sources
def searchMovie(self, title): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link_2 % urllib.quote_plus(cleantitle.getsearch(title))) r = client.request(url, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) url = [i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1])][0] return url except: return
def resolve(self, url): try: result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') url = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0] url = client.parseDOM(url, 'iframe', ret='src')[0] return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'link_ite') links = client.parseDOM(result, 'table', attrs={'class': 'link_ite.+?'}) for i in links: try: url = client.parseDOM(i, 'a', ret='href') url = [x for x in url if 'gtfo' in x][-1] url = proxy.parse(url) url = urlparse.parse_qs( urlparse.urlparse(url).query)['gtfo'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'div', attrs={'class': 'quality'}) if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM' else: quality = 'SD' quality = quality.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) h = {'User-Agent': client.agent()} r = client.request(url, headers=h, output='extended') s = client.parseDOM(r[0], 'ul', attrs={'class': 'episodes'}) s = client.parseDOM(s, 'a', ret='data.+?') s = [ client.replaceHTMLCodes(i).replace(':', '=').replace( ',', '&').replace('"', '').strip('{').strip('}') for i in s ] for u in s: try: url = '/io/1.0/stream?%s' % u url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = json.loads(r) url = [i['src'] for i in r['streams']] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def movie_info(self, url): try: u = urlparse.urljoin(self.base_link, self.info_link) u = client.request(u % url) q = client.parseDOM(u, 'div', attrs = {'class': 'jtip-quality'})[0] y = client.parseDOM(u, 'div', attrs = {'class': 'jt-info'}) y = [i.strip() for i in y if i.strip().isdigit() and len(i.strip()) == 4][0] return y, q except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict] locDict = [i[0] for i in hostDict] result = client.request(url) links = [] try: r = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0] r = client.parseDOM(r, 'iframe', ret='src')[0] links += [(r, url)] except: pass try: r = client.parseDOM(result, 'div', attrs = {'class': 'generic-video-item'}) r = [(i.split('</div>', 1)[-1].split()[0], client.parseDOM(i, 'a', ret='href', attrs = {'rel': '.+?'})) for i in r] links += [(i[0], i[1][0]) for i in r if i[1]] except: pass for i in links: try: try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i[0].strip().lower()).netloc)[0] except: host = i[0].lower() host = host.rsplit('.', 1)[0] if not host in locDict: raise Exception() host = [x[1] for x in hostDict if x[0] == host][0] host = host.encode('utf-8') url = i[1] url = urlparse.urljoin(self.base_link, url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def movie(self, imdb, title, localtitle, year): try: langMap = { 'hi': 'hindi', 'ta': 'tamil', 'te': 'telugu', 'ml': 'malayalam', 'kn': 'kannada', 'bn': 'bengali', 'mr': 'marathi', 'pa': 'punjabi' } lang = 'http://www.imdb.com/title/%s/' % imdb lang = client.request(lang) lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang) lang = [i for i in lang if 'primary_language' in i] lang = [ urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang ] lang = [ i['primary_language'] for i in lang if 'primary_language' in i ] lang = langMap[lang[0][0]] q = self.search_link % (lang, urllib.quote_plus(title)) q = urlparse.urljoin(self.base_link, q) t = cleantitle.get(title) r = client.request(q) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs={'class': 'info'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = str(r) return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] url = proxy.parse(url) url = urlparse.parse_qs( urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM' elif quality == 'quality_dvd': quality = 'SD' else: raise Exception() sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def resolve(self, url): try: id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0] result = client.request('http://www.youtube.com/watch?v=%s' % id) message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'}) message = ''.join(message) alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'}) if len(alert) > 0: raise Exception() if re.search('[a-zA-Z]', message): raise Exception() url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') result = client.parseDOM(result, 'div', attrs={'class': 'tv_episode_item'}) title = cleantitle.get(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result] result = [ (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0 ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [ (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0 ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [ i for i in result if title == cleantitle.get(i[1]) and premiered == i[2] ][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [ i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0] ] url = client.replaceHTMLCodes(url[0][0]) url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def searchMovie(self, title): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'h2')) r = [ i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1]) ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % r) return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,.+?label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: if '1080' in i[1]: quality = '1080p' elif '720' in i[1]: quality = 'HD' else: raise Exception() url = i[0].replace('\/', '/') url = client.replaceHTMLCodes(url) if not '.php' in i[0]: raise Exception() url = url.encode('utf-8') sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'player_wraper'}) r = client.parseDOM(r, 'iframe', ret='src')[0] r = urlparse.urljoin(url, r) r = client.request(r, referer=url) a = client.parseDOM(r, 'div', ret='value', attrs={'id': 'k2'})[-1] b = client.parseDOM(r, 'div', ret='value', attrs={'id': 'k1'})[-1] c = client.parseDOM(r, 'body', ret='style')[0] c = re.findall('(\d+)', c)[-1] r = '/player/%s?s=%s&e=%s' % (a, b, c) r = urlparse.urljoin(url, r) r = client.request(r, referer=url) r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() url = client.request(u) url = client.parseDOM(url, 'source', ret='src') for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def onlinedizi_tvcache(self): try: result = client.request(self.base_link) result = client.parseDOM(result, 'ul', attrs={'class': 'all-series-list.+?'})[0] result = client.parseDOM(result, 'li') result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [(i[0][-1], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(re.compile('http.+?//.+?/diziler(/.+?/)').findall(i[0]), re.sub('&#\d*;', '', i[1])) for i in result] result = [(i[0][0], cleantitle.get(i[1])) for i in result if len(i[0]) > 0] return result except: return
def episodeAbsoluteNumber(self, thetvdb, season, episode): try: url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode)) r = client.request(url) episode = client.parseDOM(r, 'absolute_number')[0] return int( episode ) except: pass return episode
def dizigold_tvcache(self): try: result = client.request(self.base_link) result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0] result = re.compile('href="(.+?)">(.+?)<').findall(result) result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result] result = [(i[0], cleantitle.get(i[1])) for i in result] return result except: return
def searchMovie(self, title): try: title = cleantitle.normalize(title) u = urlparse.urljoin(self.base_link, self.search_link) p = urllib.urlencode({'keyword': title}) r = client.request(u, post=p, XHR=True) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) if r: return r except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year): try: t = cleantitle.get(tvshowtitle) q = urllib.quote_plus(cleantitle.query(tvshowtitle)) p = urllib.urlencode({'term': q}) r = client.request(self.search_link, post=p, XHR=True) try: r = json.loads(r) except: r = None r = None if r: r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i] else: r = proxy.request(self.search_link_2 % q, 'tv shows') r = client.parseDOM(r, 'div', attrs={'valign': '.+?'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0], i[1], i[2][-1]) for i in r if i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] url = r[0][0] url = proxy.parse(url) url = url.strip('/').split('/')[-1] url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = '%s/serie/%s' % (self.base_link, url) r = proxy.request(url, 'tv shows') r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'}) t = cleantitle.get(title) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r] r = [(i[0], i[1][0].split(' ')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]] r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]] r = [(i[0][0], i[1], i[2]) for i in r if i[0]] url = [ i for i in r if t == cleantitle.get(i[1]) and premiered == i[2] ][:1] if not url: url = [i for i in r if t == cleantitle.get(i[1])] if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]] if len(url) > 1 or not url: raise Exception() url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def searchMovie(self, title, year): try: title = cleantitle.normalize(title) url = urlparse.urljoin( self.base_link, self.moviesearch_link % (cleantitle.geturl(title.replace('\'', '-')))) r = client.request(url) t = cleantitle.get(title) r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) return url.encode('utf-8') except: return