def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'}) title = cleantitle.get(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result] result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]] url = client.replaceHTMLCodes(url[0][0]) url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] url = urlparse.urljoin(self.base_link, url) r = cache.get(client.request, 1, url) try: v = client.parseDOM(r, 'iframe', ret='data-src')[0] url = v.split('=')[1] try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass r = client.parseDOM(r, 'tbody') r = client.parseDOM(r, 'tr') r = [(re.findall('<td>(.+?)</td>', i)[0], client.parseDOM(i, 'a', ret='href')[0]) for i in r] if r: for i in r: try: host = i[0] url = urlparse.urljoin(self.base_link, i[1]) host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'other' in host: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] url = proxy.parse(url) url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'span', ret='class')[0] quality,info = source_utils.get_release_quality(quality, url) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: tv_maze = tvmaze.tvMaze() tvshowtitle = tv_maze.showLookup('thetvdb', tvdb) tvshowtitle = tvshowtitle['name'] t = cleantitle.get(tvshowtitle) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(tvshowtitle) r = client.request(q) r = client.parseDOM(r, 'ul', attrs={'class': 'items'}) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r] r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def strip_domain(url): try: if url.lower().startswith('http') or url.startswith('/'): url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: log_exception()
def getTVShowTranslation(self, thetvdb, lang): try: url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % ( 'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang) r = client.request(url) title = client.parseDOM(r, 'SeriesName')[0] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') return title except: pass
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if (self.user == '' or self.password == ''): raise Exception() if url == None: return url = '%s/season/%01d/episode/%01d' % (url, int(season), int(episode)) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def parse(url): try: url = client.replaceHTMLCodes(url) except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass return url
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: key = urlparse.urljoin(self.base_link, self.key_link) key = proxy.request(key, 'main_body') key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.tvsearch_link % (urllib.quote_plus(cleantitle.query(tvshowtitle)), key) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'main_body')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'main_body')) result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'main_body') r = re.findall('(tt\d+)', r) if imdb in r: url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: if (self.user == '' or self.password == ''): raise Exception() t = cleantitle.get(title) u = urlparse.urljoin(self.base_link, self.search_link) p = { 'q': title.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': '' } p = urllib.urlencode(p) r = client.request(u, post=p, XHR=True) r = json.loads(r) r = [ i for i in r if i['meta'].strip().split()[0].lower() == 'movie' ] r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2] r = [(i, urlparse.urljoin(self.base_link, i)) for i in r] r = [(i[0], client.request(i[1])) for i in r] r = [(i[0], i[1]) for i in r if not i[1] == None] r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r] r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r if i[1]] r = [i for i in r if year in i[1]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: raise Exception() r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: if 'vidnode.net' in link: try: files = [] while True: try: try:r = client.request(link) except: continue files.extend(re.findall("{file: \'(.+?)\',label: \'(.+?)\'.+?}", r)) link = re.findall('window\.location = \"(.+?)\";', r)[0] if not 'vidnode' in link: break except Exception: break for i in files: try: url = i[0] quality = i[1] host = 'CDN' if 'google' in url: host = 'gvideo' if 'lh3.googleusercontent.com' in url: url = directstream.googleproxy(url) sources.append({ 'source': host, 'quality': source_utils.label_to_quality(quality), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: pass except: pass else: try: host = urlparse.urlparse(link.strip().lower()).netloc if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def resolve(regex): try: vanilla = re.compile('(<regex>.+)', re.MULTILINE | re.DOTALL).findall(regex)[0] cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>', re.MULTILINE | re.DOTALL).findall(regex) for i in cddata: regex = regex.replace('<![CDATA[' + i + ']]>', urllib.quote_plus(i)) regexs = re.compile('(<regex>.+)', re.MULTILINE | re.DOTALL).findall(regex)[0] regexs = re.compile('<regex>(.+?)</regex>', re.MULTILINE | re.DOTALL).findall(regexs) regexs = [ re.compile('<(.+?)>(.*?)</.+?>', re.MULTILINE | re.DOTALL).findall(i) for i in regexs ] regexs = [ dict([(client.replaceHTMLCodes(x[0]), client.replaceHTMLCodes(urllib.unquote_plus(x[1]))) for x in i]) for i in regexs ] regexs = [(i['name'], i) for i in regexs] regexs = dict(regexs) url = regex.split('<regex>', 1)[0].strip() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') r = getRegexParsed(regexs, url) try: ln = '' ret = r[1] listrepeat = r[2]['listrepeat'] regexname = r[2]['name'] for obj in ret: try: item = listrepeat for i in range(len(obj) + 1): item = item.replace( '[%s.param%s]' % (regexname, str(i)), obj[i - 1]) item2 = vanilla for i in range(len(obj) + 1): item2 = item2.replace( '[%s.param%s]' % (regexname, str(i)), obj[i - 1]) item2 = re.compile('(<regex>.+?</regex>)', re.MULTILINE | re.DOTALL).findall(item2) item2 = [ x for x in item2 if not '<name>%s</name>' % regexname in x ] item2 = ''.join(item2) ln += '\n<item>%s\n%s</item>\n' % (item, item2) except: pass return ln except: pass if r[1] == True: return r[0] except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'].replace(':', '').lower() year = data['year'] query = '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin(self.base_link, self.post_link) post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus( query) r = client.request(url, post=post) r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'}) r = [(dom_parser2.parse_dom(i, 'div', attrs={'class': 'news-title'})) for i in r if data['imdb'] in i] r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i] r = [(i[0].attrs['href'], i[0].content) for i in r if i] hostDict = hostprDict + hostDict for item in r: try: name = item[1] y = re.findall('\((\d{4})\)', name)[0] if not y == year: raise Exception() s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name) s = s[0] if s else '0' data = client.request(item[0]) data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'}) data = re.findall( '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>', data[0].content, re.DOTALL) u = [(i[0], i[1], s) for i in data if i] for name, url, size in u: try: if '4K' in name: quality = '4K' elif '1080p' in name: quality = '1080p' elif '720p' in name: quality = '720p' elif any(i in ['dvdscr', 'r5', 'r6'] for i in name): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in name): quality = 'CAM' else: quality = '720p' info = [] if '3D' in name or '.3D.' in url: info.append('3D') quality = '1080p' if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1] div = 1 if size.endswith( ('Gb', 'GiB', 'GB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']): continue if 'ftp' in url: host = 'COV' direct = True else: direct = False host = 'turbobit.net' #if not host in hostDict: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': direct, 'debridonly': True }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'Season %d' % int( data['season']) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) self.scraper = cfscrape.create_scraper() r = self.scraper.get(url).content posts = client.parseDOM(r, 'li') for post in posts: try: data = dom_parser2.parse_dom(post, 'a', req='href')[0] t = re.findall('title=.+?>\s*(.+?)$', data.content, re.DOTALL)[0] t2 = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t) y = re.findall( '[\.|\(|\[|\s](S\d*E\d*|Season\s*\d*|\d{4})[\.|\)|\]|\s]', t)[-1] if not (cleantitle.get_simple( t2.replace('720p / 1080p', '')) == cleantitle.get(title) and y == hdlr): raise Exception() link = client.parseDOM(post, 'a', ret='href')[0] if not 'Episodes' in post: u = self.movie_links(link) else: sep = 'S%02dE%02d' % (int( data['season']), int(data['episode'])) u = self.show_links(link, sep) for item in u: quality, info = source_utils.get_release_quality( item[0][0], None) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', item[0][1])[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = item[0][0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': 'popcorn', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = url imdb = data['imdb'] try: query = urlparse.urljoin(self.base_link, self.search_link) result = requests.get(query).text m = re.findall( 'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL) for i in m: print(i) m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]] if m: link = m else: query = urlparse.urljoin(self.base_link, self.search_link2) result = requests.get(query).text m = re.findall( 'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL) m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]] if m: link = m else: query = urlparse.urljoin(self.base_link, self.search_link3) result = requests.get(query).text m = re.findall( 'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL) m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]] if m: link = m except: traceback.print_exc() return for item in link: try: quality, info = source_utils.get_release_quality( item[2], None) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[0])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: traceback.print_exc() pass info = ' | '.join(info) url = item[2] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True, 'debridonly': False }) except: traceback.print_exc() pass return sources except: traceback.print_exc() return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query.replace("&", "and") query = query.replace(" ", " ") query = query.replace(" ", "-") url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) url = "http://rlsbb.ru/" + query if 'tvshowtitle' not in data: url = url + "-1080p" r = client.request(url) # check for season pack if r == None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) query = title query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query + "-S" + season query = query.replace("&", "and") query = query.replace(" ", " ") query = query.replace(" ", "-") url = "http://rlsbb.ru/" + query r = client.request(url) posts = client.parseDOM(r, "div", attrs={"class": "content"}) hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: name = str(i) if hdlr in name.upper(): items.append(name) except: pass except: pass seen_urls = set() for item in items: try: info = [] url = str(item) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in seen_urls: continue seen_urls.add(url) # clean the URL host = url.replace("\\", "") host2 = host.strip('"') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() if any(x in host2 for x in ['.rar', '.zip', '.iso']): raise Exception() if '720p' in host2: quality = 'HD' elif '1080p' in host2: quality = '1080p' else: quality = 'SD' info = ' | '.join(info) host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': host2, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: print("Unexpected error in RLSBB Source Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] c = client.parseDOM(post, 'content.+?') u = c[0].split('<h1 ') u = [i for i in u if 'Download Links' in i] u = client.parseDOM(u, 'a', ret='href') try: s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c[0])[0] except: s = '0' items += [(t, i, s) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality(name, item[1]) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': 'HEVC', 'direct': False, 'debridonly': True}) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources