def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) quality = re.findall(">(\w+)<\/p", r) if quality[0] == "HD": quality = "720p" else: quality = "SD" r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: url = { 'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name'] } url = urllib.urlencode(url) sources.append({ 'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i] r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]] for i in r: try: host = i[1] if str(host) in str(hostDict): host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0].replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season) url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year']))) r = client.request(url) r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: if i.content == 'Episode %s'%episode: url = i.attrs['href'] return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title).replace('-', '+') url = urlparse.urljoin(self.base_link, (self.search_link % clean_title)) r = self.scraper.get(url).content r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i] r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]] r = [(i[0], i[1]) for i in r if i[1] == year] if r[0]: url = r[0][0] return url else: return except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = dom_parser2.parse_dom(r, 'li', {'class': 'item'}) r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}), re.findall('status-year">(\d{4})</div', i.content, re.DOTALL)[0]) for i in r if i] r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], i[1]) for i in r if i] r = [(i[0], i[1], i[2]) for i in r if ( cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year) ] url = r[0][0] return url except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url[ 'episode'] = premiered, season, episode try: clean_title = cleantitle.geturl( url['tvshowtitle']) + '-season-%d' % int(season) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = dom_parser2.parse_dom(r, 'li', {'class': 'item'}) r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}), dom_parser2.parse_dom(i, 'div', attrs={'class': 'status'})[0]) for i in r if i] r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], re.findall('(\d+)', i[1].content)[0]) for i in r if i] r = [(i[0], i[1].split(':')[0], i[2]) for i in r if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get( url['tvshowtitle']) and i[2] == str(int(season)))] url = r[0][0] except: pass data = client.request(url) data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'].replace(':','').lower() year = data['year'] query = '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin(self.base_link, self.post_link) post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query) r = client.request(url, post=post) r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'}) r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i] r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i] r = [(i[0].attrs['href'], i[0].content) for i in r if i] hostDict = hostprDict + hostDict for item in r: try: name = item[1] y = re.findall('\((\d{4})\)', name)[0] if not y == year: raise Exception() s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name) s = s[0] if s else '0' data = client.request(item[0]) data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'}) data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>', data[0].content, re.DOTALL) u = [(i[0], i[1], s) for i in data if i] for name, url, size in u: try: if '4K' in name: quality = '4K' elif '1080p' in name: quality = '1080p' elif '720p' in name: quality = '720p' elif any(i in ['dvdscr', 'r5', 'r6'] for i in name): quality = 'SCR' elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in name): quality = 'CAM' else: quality = '720p' info = [] if '3D' in name or '.3D.' in url: info.append('3D'); quality = '1080p' if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1] div = 1 if size.endswith(('Gb', 'GiB', 'GB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):continue if 'ftp' in url: host = 'COV'; direct = True; else: direct = False; host= 'turbobit.net' #if not host in hostDict: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': direct, 'debridonly': False}) except: pass except: pass return sources except: failure = traceback.format_exc() log_utils.log('UltraHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if\ 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) scraper = cfscrape.create_scraper() r = scraper.get(url).content items = dom_parser2.parse_dom(r, 'h2') items = [ dom_parser2.parse_dom( i.content, 'a', req=['href', 'rel', 'title', 'data-wpel-link']) for i in items ] items = [(i[0].content, i[0].attrs['href']) for i in items] hostDict = hostprDict + hostDict for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) scraper = cfscrape.create_scraper() r = scraper.get(item[1]).content links = dom_parser2.parse_dom( r, 'a', req=['href', 'rel', 'data-wpel-link', 'target']) links = [i.attrs['href'] for i in links] for url in links: try: if hdlr in name: fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = '720p' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name[2])[-1] div = 1 if size.endswith( ('GB', 'GiB')) else 1024 size = float( re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) if not any(x in url for x in ['.rar', '.zip', '.iso']): url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( url.strip().lower()).netloc)[0] if host in hostDict: host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources