def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content quality = re.findall(">(\w+)<\/p", r) if quality[0] == "HD": quality = "720p" else: quality = "SD" r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: url = { 'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name'] } url = urllib.urlencode(url) sources.append({ 'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: query = self.tvsearch_link % urllib.quote_plus( cleantitle.query(tvshowtitle)) query = urlparse.urljoin(self.base_link, query.lower()) result = self.scraper.get(query).content result = client.parseDOM( result, 'div', attrs={ 'class': 'index_item.+?'}) result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0]) for i in result if i] result = [ (i.attrs['href']) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get( re.sub( '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', i.attrs['title'], flags=re.I))][0] url = client.replaceHTMLCodes(result) url = url.encode('utf-8') return url except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season) url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year']))) r = self.scraper.get(url).content r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: if i.content == 'Episode %s' % episode: url = i.attrs['href'] return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title).replace('-', '+') url = urlparse.urljoin(self.base_link, (self.search_link % clean_title)) r = self.scraper.get(url).content r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i] r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]] r = [(i[0], i[1]) for i in r if i[1] == year] if r[0]: url = r[0][0] return url else: return except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return r = client.request(url) r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'}) r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}), \ dom_parser2.parse_dom(i, 'div', {'class': 'episode'}), \ dom_parser2.parse_dom(i, 'a', req='href')) \ for i in r if i] r = [(i[2][0].attrs['href']) for i in r if i[0][0].content == 'Season %01d' % int(season) \ and i[1][0].content == 'Episode %01d' % int(episode)] if r: return r[0] else: return except: return
def resolve(self, url): if 'hideurl' in url: data = self.scraper.get(url).content data = client.parseDOM(data, 'div', attrs={'class': 'row'}) url = [dom_parser2.parse_dom(i, 'a', req='href')[0] for i in data] url = [i.attrs['href'] for i in url if 'direct me' in i.content][0] return url else: return url
def resolve(self, url): try: r = client.request(url) r = dom_parser2.parse_dom( r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0] url = r.attrs['href'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = self.scraper.get(search_url).content r = dom_parser2.parse_dom(r, 'li', {'class': 'item'}) r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}), re.findall('status-year">(\d{4})</div', i.content, re.DOTALL)[0]) for i in r if i] r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], i[1]) for i in r if i] r = [(i[0], i[1], i[2]) for i in r if ( cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year) ] url = r[0][0] return url except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url[ 'episode'] = premiered, season, episode try: clean_title = cleantitle.geturl( url['tvshowtitle']) + '-season-%d' % int(season) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = self.scraper.get(search_url).content r = dom_parser2.parse_dom(r, 'li', {'class': 'item'}) r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}), dom_parser2.parse_dom(i, 'div', attrs={'class': 'status'})[0]) for i in r if i] r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], re.findall('(\d+)', i[1].content)[0]) for i in r if i] r = [(i[0], i[1].split(':')[0], i[2]) for i in r if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get( url['tvshowtitle']) and i[2] == str(int(season)))] url = r[0][0] except: pass data = self.scraper.get(url).content data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def _get_sources(self, item, hostDict): try: quality, info = source_utils.get_release_quality(item[0], item[1]) size = item[2] if item[2] != '0' else item[0] try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', size)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace( ',', '.'))) / div size = '%.2f GB' % size info.append(size) except Exception: pass data = self.scraper.get(item[1]).content try: r = client.parseDOM(data, 'li', attrs={'class': 'elemento'}) r = [(dom_parser2.parse_dom(i, 'a', req='href')[0], dom_parser2.parse_dom(i, 'img', req='alt')[0], dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0]) for i in r] urls = [('http:' + i[0].attrs['href'] if not i[0].attrs['href'].startswith('http') else i[0].attrs['href'], i[1].attrs['alt'], i[2].content) for i in r if i[0] and i[1]] for url, host, qual in urls: try: if any(x in url for x in ['.rar', '.zip', '.iso', ':Upcoming']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid( host, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality, info = source_utils.get_release_quality( qual, quality) info.append('HEVC') info = ' | '.join(info) self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except Exception: pass except Exception: pass except BaseException: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = self.searchMovie(data['title'], data['year']) if url is None: return sources r = client.request(url) data = client.parseDOM(r, 'div', attrs={'class': 'playex'})[0] frames = client.parseDOM(data, 'iframe', ret='src') frames += re.compile('''<iframe\s*src=['"](.+?)['"]''', re.DOTALL).findall(data) quality = client.parseDOM(r, 'span', attrs={'class': 'qualityx'})[0] for frame in frames: url = frame.split('=')[1] if frame.startswith('<') else frame url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( quality, url) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) elif url.endswith('mp4'): url += '|User-Agent=%s' % urllib.quote_plus(client.agent()) sources.append({ 'source': 'MP4', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) elif 'mystream' in url: data = client.request(url) links = dom_parser2.parse_dom(data, 'source', req=['src', 'label']) for link in links: label = link.attrs['label'] url = link.attrs[ 'src'] + '|User-Agent=%s' % urllib.quote_plus( client.agent()) sources.append({ 'source': 'MYSTREAM', 'quality': label, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) else: continue return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \ 'tvshowtitle' in data else '%s' % (data['title']) url = self.search_link % urllib.quote_plus(query).lower() url = urlparse.urljoin(self.base_link, url) headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content items = dom_parser2.parse_dom(r, 'h2') items = [dom_parser2.parse_dom(i.content, 'a', req=['href', 'rel', 'data-wpel-link']) for i in items] items = [(i[0].content, i[0].attrs['href']) for i in items] hostDict = hostprDict + hostDict for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) query = query.lower().replace(' ', '-') if not query in item[1]: continue url = item[1] headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content links = dom_parser2.parse_dom(r, 'a', req=['href', 'rel', 'data-wpel-link']) links = [i.attrs['href'] for i in links] for url in links: try: if hdlr in name.upper() and cleantitle.get(title) in cleantitle.get(name): fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if not any(x in url for x in ['.rar', '.zip', '.iso']): url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = \ re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if host in hostDict: host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append( {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \ 'tvshowtitle' in data else '%s' % (data['title']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) headers = {'Referer': self.base_link} r = self.scraper.get(url, headers=headers).content search_results = dom_parser2.parse_dom(r, 'h2') search_results = [ dom_parser2.parse_dom(i.content, 'a', req=['href']) for i in search_results ] search_results = [(i[0].content, i[0].attrs['href']) for i in search_results] items = [] for search_result in search_results: try: headers = {'Referer': url} r = self.scraper.get(search_result[1], headers=headers).content links = dom_parser2.parse_dom(r, 'a', req=[ 'href', 'rel', ]) links = [i.attrs['href'] for i in links] for url in links: try: if hdlr in url.upper() and cleantitle.get( title) in cleantitle.get(url): items.append(url) except: pass except: pass seen_urls = set() for item in items: try: url = str(item) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in seen_urls: continue seen_urls.add(url) if any(x in url for x in ['.part', 'extras', 'subs', 'dubbed', 'dub', 'MULTISUBS', 'sample', 'youtube', 'trailer']) \ or any(url.endswith(x) for x in ['.rar', '.zip', '.iso', '.sub', '.idx', '.srt']): raise Exception() quality, info = source_utils.get_release_quality(url, url) host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if host in hostDict: host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] results_limit = 30 vshare_limit = 1 openload_limit = 1 speedvid_limit = 1 vidoza_limit = 1 vidlox_limit = 1 mango_limit = 1 streamplay_limit = 1 vidtodo_limit = 1 clipwatch_limit = 1 vidcloud_limit = 1 vev_limit = 1 flix555_limit = 1 if url == None: return sources r = client.request(url) r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'}) r = [(dom_parser2.parse_dom(i, 'a', req='href'), \ dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \ for i in r if i] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r] for i in r: try: url = i[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(i[1], hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'vshare' in host: if vshare_limit < 1: continue else: vshare_limit -= 1 if 'openload' in host: if openload_limit < 1: continue else: openload_limit -= 1 if 'speedvid' in host: if speedvid_limit < 1: continue else: speedvid_limit -= 1 if 'vidoza' in host: if vidoza_limit < 1: continue else: vidoza_limit -= 1 if 'vidlox' in host: if vidlox_limit < 1: continue else: vidlox_limit -= 1 if 'vidtodo' in host: if vidtodo_limit < 1: continue else: vidtodo_limit -= 1 if 'mango' in host: if mango_limit < 1: continue else: mango_limit -= 1 if 'streamplay' in host: if streamplay_limit < 1: continue else: streamplay_limit -= 1 if 'clipwatch' in host: if clipwatch_limit < 1: continue else: clipwatch_limit -= 1 if 'vidcloud' in host: if vidcloud_limit < 1: continue else: vidcloud_limit -= 1 if 'vev' in host: if vev_limit < 1: continue else: vev_limit -= 1 if 'flix555' in host: if flix555_limit < 1: continue else: flix555_limit -= 1 info = [] quality, info = source_utils.get_release_quality( i[2], i[2]) info = ' | '.join(info) if results_limit < 1: continue else: results_limit -= 1 sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources