def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q r = cfScraper.get(url).content r = ensure_text(r) v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>').findall(r) for url, check, qual in v: t = '%s (%s)' % (data['title'], data['year']) if t in check: key = url.split('-hd')[1] url = 'https://fmovies.tw/moviedownload.php?q=%s' % key r = cfScraper.get(url).content r = ensure_text(r) r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue #quality, _ = source_utils.get_release_quality(qual, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: #info = ' | '.join(info) sources.append( {'source': host, 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except Exception: failure = traceback.format_exc() log_utils.log('Ganool Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostDict + hostprDict r = cfScraper.get(url).content match = re.compile( '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>', re.DOTALL | re.M).findall(r) for link in match: if "/show/" in url: surl = "http://www.tvmovieflix.com/e/" + link else: surl = "http://www.tvmovieflix.com/m/" + link i = cfScraper.get(surl).content match = re.compile('<IFRAME.+?SRC="(.+?)"', re.DOTALL | re.IGNORECASE).findall(i) for link in match: if "realtalksociety.com" in link: r = requests.get(link).content match = re.compile( '<source src="(.+?)" type="video/mp4">', re.DOTALL | re.IGNORECASE).findall(r) for url in match: valid, host = source_utils.is_host_valid( url, hostDict) quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid( link, hostDict) quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = cfScraper.get(url).content qual = re.compile('class="quality">(.+?)</span>').findall(r) for i in qual: info = i if '1080' in i: quality = '1080p' elif '720' in i: quality = '720p' else: quality = 'SD' u = re.compile('data-video="(.+?)"').findall(r) for url in u: if not url.startswith('http'): url = "https:" + url if 'vidcloud' in url: r = cfScraper.get(url).content t = re.compile('data-video="(.+?)"').findall(r) for url in t: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid and 'vidcloud' not in url: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def searchMovie(self, title, year, aliases): try: #title = cleantitle.normalize(title) url = urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if url == None: url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urljoin(self.base_link, '%s/watching.html' % url) return url except: log_utils.log('123movies2 exception', 1) return
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) # r = client.request(url, headers=headers, timeout='10') r = cfScraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='oldtitle')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if url is None: url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict if url == None: return sources r = cfScraper.get(url).content quality = re.findall(">(\w+)<\/p", r) if quality[0] == "HD": quality = "720p" else: quality = "SD" r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name']} url = urllib.urlencode(url) valid, host = source_utils.is_host_valid(i.content, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict #headers = {'Referer': url} r = cfScraper.get(url).content u = client.parseDOM(r, "span", attrs={"class": "movie_version_link"}) for t in u: match = client.parseDOM(t, 'a', ret='data-href') for url in match: if url in str(sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources hostDict = hostDict + hostprDict sourcePage = ensure_text(cfScraper.get(url).content, errors='replace') thesources = re.compile('<tbody>(.+?)</tbody>', re.DOTALL).findall(sourcePage)[0] links = re.compile("<a href=\'(.+?)\' target=\'_blank\'>Download</a>", re.DOTALL).findall(thesources) for link in links: linkPage = ensure_text(cfScraper.get(link).content, errors='replace') vlink = re.compile('<a id="link" rel="nofollow" href="(.+?)" class="btn"', re.DOTALL).findall(linkPage) for zlink in vlink: valid, host = source_utils.is_host_valid(zlink, hostDict) if valid: quality, info = source_utils.get_release_quality(zlink, zlink) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': zlink, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def resolve(self, url): try: urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) post = {'ipplugins': 1, 'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'], 'fix': "0"} cfScraper.headers.update({'Referer': urldata['url'], 'X-Requested-With': 'XMLHttpRequest'}) p1 = cfScraper.post('http://123123movies.net/ip.file/swf/plugins/ipplugins.php', data=post).content p1 = json.loads(p1) p2 = cfScraper.get('http://123123movies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' % ( p1['s'], urldata['data-server'])).content p2 = json.loads(p2) p3 = cfScraper.get('http://123123movies.net/ip.file/swf/ipplayer/api.php?hash=%s' % (p2['hash'])).content p3 = json.loads(p3) n = p3['status'] if n == False: p2 = cfScraper.get('http://123123movies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' % ( p1['s'], urldata['data-server'])).content p2 = json.loads(p2) url = "https:%s" % p2["data"].replace("\/", "/") return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: search = cleantitle.getsearch(imdb) url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search.replace(':', ' ').replace(' ', '+')) r = cfScraper.get(url).content Yourmouth = re.compile( '<div class="post_thumb".+?href="(.+?)"><h2 class="thumb_title">(.+?)</h2>', re.DOTALL).findall(r) for Myballs, Mycock in Yourmouth: if cleantitle.get(title) in cleantitle.get(Mycock): return Myballs return except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season) url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year']))) r = cfScraper.get(url).content r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: if i.content == 'Episode %s' % episode: url = i.attrs['href'] return url except: return
def searchShow(self, title, season, aliases, headers): try: title = cleantitle.normalize(title) search = '%s Season %01d' % (title, int(season)) url = urlparse.urljoin( self.base_link, self.search_link % cleantitle.geturl(search)) # r = client.request(url, headers=headers, timeout='10') r = cfScraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: return
def searchShow(self, title, season, aliases): try: #title = cleantitle.normalize(title) search = '%s Season %01d' % (title, int(season)) url = urljoin(self.base_link, self.search_link % cleantitle.geturl(search)) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season ][0] url = urljoin(self.base_link, '%s/watching.html' % url) return url except: log_utils.log('123movies1 exception', 1) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostDict + hostprDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season']), ep) # r = client.request(url, headers=headers, timeout='10', output='geturl') r = cfScraper.get(url).content if url is None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url is None: url = '%s/film/%s/watching.html?ep=0' % ( self.base_link, cleantitle.geturl(data['title'])) if url is None: raise Exception() # r = client.request(url, headers=headers, timeout='10') r = cfScraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: link = "https:" + link if not link.startswith('http') else link if '123movieshd' in link or 'seriesonline' in link: # r = client.request(link, headers=headers, timeout='10') r = cfScraper.get(link).content r = re.findall('(https:.*?redirector.*?)[\'\"]', r) for i in r: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid(link, hostDict) if valid: quality, info = source_utils.get_release_quality( link, link) if 'load.php' not in link: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season']), ep) r = client.request(url, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases) else: url = self.searchMovie(data['title'], data['year'], aliases) if url == None: raise Exception() r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: try: if link.startswith('//'): link = 'https:' + link host = re.findall('([\w]+[.][\w]+)$', urlparse(link.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'load.php' not in link: sources.append({ 'source': host, 'quality': '720p', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) except: pass return sources except: log_utils.log('123movies0 exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] url = urljoin( self.base_link, self.search_link % quote_plus(cleantitle.query(title))) headers = {'User-Agent': self.User_Agent} if 'tvshowtitle' in data: html = cfScraper.get(url, headers=headers).content html = ensure_str(html) match = re.compile( 'class="post-item.+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html) for url, item_name in match: if cleantitle.getsearch(title).lower( ) in cleantitle.getsearch(item_name).lower(): season_url = '%02d' % int(data['season']) episode_url = '%02d' % int(data['episode']) sea_epi = 'S%sE%s' % (season_url, episode_url) result = cfScraper.get(url, headers=headers, timeout=10).content Regex = re.compile('href="(.+?)"', re.DOTALL).findall(result) for ep_url in Regex: if sea_epi in ep_url: if '1080p' in ep_url: qual = '1080p' elif '720p' in ep_url: qual = '720p' elif '480p' in ep_url: qual = '480p' else: qual = 'SD' sources.append({ 'source': 'CDN', 'quality': qual, 'language': 'en', 'url': ep_url, 'direct': False, 'debridonly': False }) else: html = requests.get(url, headers=headers).text match = re.compile( '<div class="thumbnail".+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html) for url, item_name in match: if cleantitle.getsearch(title).lower( ) in cleantitle.getsearch(item_name).lower(): if '1080' in url: quality = '1080p' elif '720' in url: quality = '720p' else: quality = 'SD' result = requests.get(url, headers=headers, timeout=10).text Regex = re.compile('href="/download.php.+?link=(.+?)"', re.DOTALL).findall(result) for link in Regex: if 'server=' not in link: try: link = base64.b64decode(link) link = ensure_str(link) except Exception: pass try: host = link.split('//')[1].replace( 'www.', '') host = host.split('/')[0].lower() except Exception: pass _hostDict = hostDict + hostprDict valid, host = source_utils.is_host_valid( host, _hostDict) if not valid: continue # if not self.filter_host(host): # continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except Exception: failure = traceback.format_exc() log_utils.log('ExtraMovies - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostprDict + hostDict headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0' } first_url = url r = cfScraper.get(first_url).content links = re.compile('<iframe.+?src="(.+?)://(.+?)/(.+?)"', re.DOTALL).findall(r) for http, host, url in links: host = host.replace('www.', '') url = '%s://%s/%s' % (http, host, url) if 'seehd' in url: r = cfScraper.get(url).content extra_link = re.compile('<center><iframe.+?src="(.+?)"', re.DOTALL).findall(r)[0] valid, host = source_utils.is_host_valid( extra_link, hostDict) sources.append({ 'source': host, 'quality': '720p', 'language': 'en', 'url': extra_link, 'direct': False, 'debridonly': False }) elif '24hd' in url: url = url.split('v/')[1] post_link = urlparse.urljoin(self.hdclub_link, url) payload = {'r': first_url, 'd': 'www.24hd.club'} post_data = requests.post(post_link, headers=headers, data=payload) response = post_data.content link = re.compile('"file":"(.+?)","label":"(.+?)"', re.DOTALL).findall(response) for link, quality in link: link = link.replace('\/', '/') if '1080p' in quality: quality = '1080p' elif '720p' in quality: quality = '720p' elif '480p' in quality: quality = 'SD' else: quality = 'SD' sources.append({ 'source': 'Direct', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) else: sources.append({ 'source': host, 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except Exception: return sources