def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return scraper = cfscrape.create_scraper() url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url[ 'episode'] = premiered, season, episode try: clean_title = cleantitle.geturl( url['tvshowtitle']) + '-season-%d' % int(season) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = scraper.get(search_url).content r = client.parseDOM(r, 'div', {'id': 'movie-featured'}) r = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in r] r = [(i[0][0], i[1][0]) for i in r if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)] url = r[0][0] except: pass data = scraper.get(url).content data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def links(self, url): urls = [] try: if url is None: return r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'entry'}) r = client.parseDOM(r, 'a', ret='href') r1 = [(i) for i in r if 'money' in i][0] r = client.request(r1) r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0] if 'enter the password' in r: plink= client.parseDOM(r, 'form', ret='action')[0] post = {'post_password': '******', 'Submit': 'Submit'} send_post = client.request(plink, post=post, output='cookie') link = client.request(r1, cookie=send_post) else: link = client.request(r1) link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0] link = client.parseDOM(link, 'a', ret='href') link = [(i.split('=')[-1]) for i in link] for i in link: urls.append(i) return urls except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] scraper = cfscrape.create_scraper() r = scraper.get(url).content try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass r = client.parseDOM(r, 'div', {'class': 'server_line'}) r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r] if r: for i in r: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'other' in host: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return
def resolve(self, url): if self.base_link in url: scraper = cfscrape.create_scraper() url = scraper.get(url).content v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] return url
def episodeAbsoluteNumber(self, thetvdb, season, episode): try: url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode)) return int(client.parseDOM(client.request(url), 'absolute_number')[0]) except: pass return episode
def getTVShowTranslation(self, thetvdb, lang): try: url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang) r = client.request(url) title = client.parseDOM(r, 'SeriesName')[0] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') return title except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'].replace(':', '').lower() year = data['year'] query = '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin(self.base_link, self.post_link) post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus( query) r = client.request(url, post=post) r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'}) r = [(dom_parser2.parse_dom(i, 'div', attrs={'class': 'news-title'})) for i in r if data['imdb'] in i] r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i] r = [(i[0].attrs['href'], i[0].content) for i in r if i] hostDict = hostprDict + hostDict for item in r: try: name = item[1] y = re.findall('\((\d{4})\)', name)[0] if not y == year: raise Exception() s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name) s = s[0] if s else '0' data = client.request(item[0]) data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'}) data = re.findall( '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>', data[0].content, re.DOTALL) u = [(i[0], i[1], s) for i in data if i] for name, url, size in u: try: if '4K' in name: quality = '4K' elif '1080p' in name: quality = '1080p' elif '720p' in name: quality = '720p' elif any(i in ['dvdscr', 'r5', 'r6'] for i in name): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in name): quality = 'CAM' else: quality = '720p' info = [] if '3D' in name or '.3D.' in url: info.append('3D') quality = '1080p' if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1] div = 1 if size.endswith( ('Gb', 'GiB', 'GB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']): continue if 'ftp' in url: host = 'COV' direct = True else: direct = False host = 'turbobit.net' #if not host in hostDict: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except: failure = traceback.format_exc() log_utils.log('UltraHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = '%sS%02dE%02d' % ( data['year'], int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s %s S%02dE%02d' % ( data['tvshowtitle'], data['year'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) try: url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = self.scraper.get(url).content posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) items = [] dupes = [] for post in posts: try: t = client.parseDOM(post, 'a')[0] t = re.sub('<.+?>|</.+?>', '', t) x = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t) if not cleantitle.get(title) in cleantitle.get(x): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', t.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] #if not any(i in ['1080p', '720p'] for i in fmt): raise Exception() if len(dupes) > 2: raise Exception() dupes += [x] u = client.parseDOM(post, 'a', ret='href')[0] r = self.scraper.get(u).content u = client.parseDOM(r, 'a', ret='href') u = [(i.strip('/').split('/')[-1], i) for i in u] items += u except: pass except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality( name, item[1]) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = self.scraper.get(url).content posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] c = client.parseDOM(post, 'content.+?')[0] u = client.parseDOM(c, 'p') u = [client.parseDOM(i, 'a', ret='href') for i in u] u = [i[0] for i in u if len(i) == 1] if not u: raise Exception() if 'tvshowtitle' in data: u = [(re.sub('(720p|1080p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i) for i in u] else: u = [(t, i) for i in u] items += u except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) s = client.request(self.base_link) s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s) s = [ i for i in s if urlparse.urlparse(self.base_link).netloc in i and len(i.strip('/').split('/')) > 3 ] s = s[0] if s else urlparse.urljoin(self.base_link, 'posts') s = s.strip('/') url = s + self.search_link % urllib.quote_plus(query) r = client.request(url) r = client.parseDOM(r, 'h2', attrs={'class': 'post-title .+?'}) l = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in l] r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]] r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r] r = [ i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3] ] r = [ i for i in r if not any(x in i[4] for x in [ 'HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D' ]) ] r = [i for i in r if '1080p' in i[4] ][:1] + [i for i in r if '720p' in i[4]][:1] if 'tvshowtitle' in data: posts = [(i[1], i[0]) for i in l] else: posts = [(i[1], i[0]) for i in l] hostDict = hostprDict + hostDict items = [] for post in posts: try: t = post[0] u = client.request(post[1]) u = re.findall('"(http.+?)"', u) + re.findall( '"(http.+?)"', u) u = [i for i in u if not '/embed/' in i] u = [i for i in u if not 'youtube' in i] items += [(t, i) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] print fmt if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = '720p' else: quality = '720p' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('-', '+') r = client.request(url) if r == None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) url = title r = client.request(url) for loopCount in range(0, 2): if loopCount == 1 or (r == None and 'tvshowtitle' in data): r = client.request(url) posts = client.parseDOM( r, "div", attrs={"class": "postpage_movie_download"}) hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: name = str(i) items.append(name) print items except: pass except: pass if len(items) > 0: break for item in items: try: info = [] i = str(item) r = client.request(i) u = client.parseDOM(r, "div", attrs={"class": "multilink_lnks"}) for t in u: r = client.parseDOM(t, 'a', ret='href') for url in r: if '1080p' in url: quality = '1080p' elif '1080' in url: quality = '1080p' elif '720p' in url: quality = '720p' elif '720' in url: quality = '720p' elif 'HD' in url: quality = '720p' else: quality = 'SD' info = ' | '.join(info) valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] scraper = cfscrape.create_scraper() if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = scraper.get(url).content r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0] r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL) a_txt = '' a_url = '' a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'}) a_url = client.parseDOM(r, "a", ret="href") r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL) r = re.sub('<img .+?>', '', r, flags=re.DOTALL) size = '' pre_txt = [] pre_url = [] pres = client.parseDOM(r, "pre", attrs={'style': '.+?'}) for pre in pres: try: size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0] except: pass url0 = re.findall('https?://[^ <"\'\s]+', pre, re.DOTALL) txt0 = [size] * len(url0) pre_url = pre_url + url0 pre_txt = pre_txt + txt0 r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL) size = '' if not 'tvshowtitle' in data: try: size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', r)[0] except: pass raw_url = re.findall('https?://[^ <"\'\s]+', r, re.DOTALL) raw_txt = [size] * len(raw_url) pairs = zip(a_url + pre_url + raw_url, a_txt + pre_txt + raw_txt) for pair in pairs: try: url = str(pair[0]) info = re.sub('<.+?>', '', pair[1]) if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+', '-', url + info).lower(): raise Exception() size0 = info + " " + size try: size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', size0)[0] div = 1 if size0.endswith(('GB', 'GiB')) else 1024 size0 = float(re.sub('[^0-9\.]', '', size0)) / div size0 = '%.2f GB' % size0 except: size0 = '' pass quality, info = source_utils.get_release_quality(url, info) info.append(size0) info = ' | '.join(info) url = url.encode('utf-8') hostDict = hostDict + hostprDict valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] u = client.parseDOM(post, 'link')[0] s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t) s = s[0] if s else '0' items += [(t, u, s) ] except: pass urls = [] for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality(name, item[1]) if any(x in quality for x in ['CAM', 'SD']): continue try: size = re.sub('i', '', item[2]) div = 1 if size.endswith('GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size))/div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = item[1] links = self.links(url) urls += [(i, quality, info) for i in links] except: pass for item in urls: if 'earn-money' in item[0]: continue if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue url = client.replaceHTMLCodes(item[0]) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True}) return sources except: return sources