def movie(self, imdb, title, year): self.elysium_url = [] try: if not debridstatus == 'true': raise Exception() self.elysium_url = [] cleanmovie = cleantitle_get(title) title = cleantitle_query(title) titlecheck = cleanmovie+year query = self.movie_link % (urllib.quote_plus(title),year) query = urlparse.urljoin(self.base_link, query) link = OPEN_URL(query).text match = re.compile('<a class="title" href="(.+?)">(.+?)</a>').findall(link) for h,t in match: print ("RAPIDMOVIES", h,t) h = h.encode('utf-8') t = t.encode('utf-8') check = cleantitle_get_2(t) print ("RAPIDMOVIES check", check) if h.startswith("/"): h = self.base_link + h if year in t: if titlecheck in check: info = get_size(t) quality = quality_tag(t) if "1080" in quality or "HD" in quality: self.count += 1 if not self.count >6: print ("RAPIDMOVIES PASSED", t,quality,info) self.elysium_url.append([h,quality,info]) return self.elysium_url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] for url,title,type in self.elysium_url: req = OPEN_URL(url).content r = BeautifulSoup(req) r = r.findAll('div', attrs = {'class': 'post_content'}) pattern = '<h.+?>(.*?)</h(.*?)</ul>' for container in r: html = re.compile(pattern, re.DOTALL).findall(str(container)) for titles, block in html: quality = "SD" quality = quality_tag(titles) info = '' if "hevc" in titles.lower(): info = "HEVC" info = get_size(block) links = re.compile('href="([^"]+)').findall(block) for href in links: if any(value in href for value in hostprDict): try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0] except: host = 'Videomega' url = client.replaceHTMLCodes(href) url = url.encode('utf-8') sources.append({'source': host, 'quality': quality, 'provider': 'Myvideolink', 'url': url, 'info': info,'direct': False, 'debridonly': True}) return sources except: return sources
def movie(self, imdb, title, year): self.zen_url = [] try: if not debridstatus == 'true': raise Exception() self.zen_url = [] cleanmovie = cleantitle_get(title) title = cleantitle_query(title) titlecheck = cleanmovie + year query = self.movie_link % (urllib.quote_plus(title), year) query = urlparse.urljoin(self.base_link, query) link = OPEN_URL(query).text match = re.compile( '<a class="title" href="(.+?)">(.+?)</a>').findall(link) for h, t in match: print("RAPIDMOVIES", h, t) h = h.encode('utf-8') t = t.encode('utf-8') check = cleantitle_get_2(t) print("RAPIDMOVIES check", check) if h.startswith("/"): h = self.base_link + h if year in t: if titlecheck in check: info = get_size(t) quality = quality_tag(t) if "1080" in quality or "HD" in quality: self.count += 1 if not self.count > 6: print("RAPIDMOVIES PASSED", t, quality, info) self.zen_url.append([h, quality, info]) return self.zen_url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if not debridstatus == 'true': raise Exception() for size, q, urls in self.elysium_url: for url in urls: try: print("RELEASEBB SOURCES", size, q, url) url = url.encode('utf-8') if q == 'getbyurl': quality = quality_tag(url) else: quality = q host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not any(value in url for value in hostprDict): raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'Releasebb', 'url': url, 'info': size, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] count = 0 for title, url in self.elysium_url: quality = quality_tag(title) for match in re.finditer('href="([^"]+)', url): url = match.group(1) try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] except: host = "none" if any(value in url for value in hostprDict): # print ("HDTV SOURCES", quality, url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'tvhd', 'url': url, 'direct': False, 'debridonly': True }) return sources except: return sources
def movie(self, imdb, title, year): try: if not debridstatus == 'true': raise Exception() self.elysium_url = [] query = cleantitle_query(title) cleanmovie = cleantitle_get(title) query = "%s+%s" % (urllib.quote_plus(query), year) query = self.search_link % query query = urlparse.urljoin(self.search_base_link, query) r = client.request(query, headers=self.search_header_link, referer=query) posts = [] dupes = [] print("RELEASEBB QUERY", r) try: posts += json.loads(re.findall('({.+?})$', r)[0])['results'] except: pass for post in posts: try: name = post['post_title'].encode('utf-8') url = post['post_name'].encode('utf-8') if url in dupes: raise Exception() dupes.append(url) print("RELEASEBB 2", name, url) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleanmovie in cleantitle_get( name) or not year in name: raise Exception() print("RELEASEBB 3 PASSED", t) content = post['post_content'] url = [ i for i in client.parseDOM(content, 'a', ret='href') ] size = get_size(content) quality = quality_tag(name) self.elysium_url.append([size, quality, url]) except: pass print("RELEASEBB PASSED", self.elysium_url) return self.elysium_url except: return
def movies_info(self, url, year): try: r = OPEN_URL(url).content q = client.parseDOM(r, 'div', attrs = {'class': 'jtip-quality'})[0] q = quality_tag(q) y = client.parseDOM(r, 'div', attrs = {'class': 'jt-info'}) for items in y: # print ("SOLARMOVIES INFOs CHECK", year, y) if year in items: y = year if not y == year: y = '0' return (y, q) except: return
def movies_info(self, url, year): try: r = OPEN_URL(url).content q = client.parseDOM(r, 'div', attrs={'class': 'jtip-quality'})[0] q = quality_tag(q) y = client.parseDOM(r, 'div', attrs={'class': 'jt-info'}) for items in y: # print ("SOLARMOVIES INFOs CHECK", year, y) if year in items: y = year if not y == year: y = '0' return (y, q) except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] for url, title, type in self.elysium_url: req = OPEN_URL(url).content r = BeautifulSoup(req) r = r.findAll('div', attrs={'class': 'post_content'}) pattern = '<h.+?>(.*?)</h(.*?)</ul>' for container in r: html = re.compile(pattern, re.DOTALL).findall(str(container)) for titles, block in html: quality = "SD" quality = quality_tag(titles) info = '' if "hevc" in titles.lower(): info = "HEVC" info = get_size(block) links = re.compile('href="([^"]+)').findall(block) for href in links: if any(value in href for value in hostprDict): try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( href.strip().lower()).netloc)[0] except: host = 'Videomega' url = client.replaceHTMLCodes(href) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'Myvideolink', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): self.zen_url = [] try: if not debridstatus == 'true': raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.getsearch(title) title_init = title[0] print ("DDL TITLE INIT", title_init) cleanmovie = cleantitle.get(title) data['season'], data['episode'] = season, episode self.zen_url = [] seasoncheck = "%02d" % (int(data['season'])) episodecheck = "%02d" % int(data['episode']) episodecheck = str(episodecheck) query = self.search_link % title_init query = urlparse.urljoin(self.base_link, query) link = client.request(query) r = client.parseDOM(link, 'div', attrs = {'class': 'downpara-list'}) r = r[0] if r: match = re.compile('<a href="([^"]+)[^>]*>(.*?)</a>').findall(r) for match_url, match_title in match: try: r_url = match_url.encode('utf-8') r_title = match_title.encode('utf-8') seasonid = re.findall("(?:S|s)eason (\d*)",r_title)[0] seasonid = seasonid.encode('utf-8') if seasonid == season: if cleanmovie in cleantitle.get(r_title): if not "(Pack)" in r_title: # print ("DDLS TV ",match_title) quality = quality_tag(r_title) # match_url = client.request(match_url, output='geturl') print ("PASSED DDLSTV", r_url,quality,episodecheck) self.zen_url.append([r_url,quality,episodecheck]) except: pass return self.zen_url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): self.zen_url = [] try: if not debridstatus == 'true': raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] data['season'], data['episode'] = season, episode self.zen_url = [] title = cleantitle.getsearch(title) cleanmovie = cleantitle.get(title) episodecheck = 'S%02dE%02d' % (int( data['season']), int(data['episode'])) episodecheck = str(episodecheck) episodecheck = episodecheck.lower() titlecheck = cleanmovie + episodecheck query = self.shows_link % (urllib.quote_plus(title), episodecheck) query = urlparse.urljoin(self.base_link, query) link = OPEN_URL(query).text match = re.compile( '<a class="title" href="(.+?)">(.+?)</a>').findall(link) for h, t in match: print("RAPIDMOVIES", h, t) h = h.encode('utf-8') t = t.encode('utf-8') check = cleantitle_get_2(t) print("RAPIDMOVIES check", check) if h.startswith("/"): h = self.base_link + h if titlecheck in check: info = get_size(t) quality = quality_tag(t) if "1080" in quality or "HD" in quality: self.count += 1 if not self.count > 6: print("RAPIDMOVIES PASSED", t, quality, info) self.zen_url.append([h, quality, info]) return self.zen_url except: return
def movie(self, imdb, title, year): try: if not debridstatus == 'true': raise Exception() self.zen_url = [] query = cleantitle_query(title) cleanmovie = cleantitle_get(title) query = "%s+%s" % (urllib.quote_plus(query), year) query = self.search_link % query query = urlparse.urljoin(self.search_base_link, query) r = client.request(query, headers=self.search_header_link, referer=query) posts = [] dupes = [] print ("RELEASEBB QUERY", r) try: posts += json.loads(re.findall('({.+?})$', r)[0])['results'] except: pass for post in posts: try: name = post['post_title'].encode('utf-8') url = post['post_name'].encode('utf-8') if url in dupes: raise Exception() dupes.append(url) print ("RELEASEBB 2", name,url) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleanmovie in cleantitle_get(name) or not year in name: raise Exception() print ("RELEASEBB 3 PASSED", t) content = post['post_content'] url = [i for i in client.parseDOM(content, 'a', ret='href')] size = get_size(content) quality = quality_tag(name) self.zen_url.append([size,quality,url]) except: pass print("RELEASEBB PASSED", self.zen_url) return self.zen_url except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') html = BeautifulSoup(link.content) r = html.findAll('iframe') for u in r: result = u['src'].encode('utf-8') print("WONLINE sources", result) if result.startswith("//"): result = "http:" + result if "wp-embed.php" in result: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s) for href, quality in match: quality = google_tag(href) print("WONLINE SCRIPTS", href,quality) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False}) else: if "google" in result: quality = google_tag(result) else: quality = quality_tag(result) try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(result.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(result) url = url.encode('utf-8') if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if not debridstatus == 'true': raise Exception() for size, q, urls in self.zen_url: for url in urls: try: print ("RELEASEBB SOURCES", size, q, url) url = url.encode('utf-8') if q == 'getbyurl': quality = quality_tag(url) else: quality = q host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not any(value in url for value in hostprDict): raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'provider': 'Releasebb', 'url': url, 'info': size, 'direct': False, 'debridonly': True}) except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): self.elysium_url = [] try: if not debridstatus == 'true': raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] data['season'], data['episode'] = season, episode self.elysium_url = [] title = cleantitle.getsearch(title) cleanmovie = cleantitle.get(title) episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) episodecheck = str(episodecheck) episodecheck = episodecheck.lower() titlecheck = cleanmovie+episodecheck query = self.shows_link % (urllib.quote_plus(title),episodecheck) query = urlparse.urljoin(self.base_link, query) link = OPEN_URL(query).text match = re.compile('<a class="title" href="(.+?)">(.+?)</a>').findall(link) for h,t in match: print ("RAPIDMOVIES", h,t) h = h.encode('utf-8') t = t.encode('utf-8') check = cleantitle_get_2(t) print ("RAPIDMOVIES check", check) if h.startswith("/"): h = self.base_link + h if titlecheck in check: info = get_size(t) quality = quality_tag(t) if "1080" in quality or "HD" in quality: self.count += 1 if not self.count > 6: print ("RAPIDMOVIES PASSED", t,quality,info) self.elysium_url.append([h,quality,info]) return self.elysium_url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] for movielink, title in self.elysium_url: mylink = client.request(movielink) info = get_size(title) quality = quality_tag(title) match2 = re.compile( "<a target='_blank' href='(.+?)'>").findall(mylink) for url in match2: if any(value in url for value in hostprDict): if "http" in url: try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( url.strip().lower()).netloc)[0] except: host = 'Videomega' host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'Tvrls', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] for movielink,title in self.elysium_url: mylink = client.request(movielink) info = get_size(title) quality = quality_tag(title) match2 = re.compile("<a target='_blank' href='(.+?)'>").findall(mylink) for url in match2: if any(value in url for value in hostprDict): if "http" in url: try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] except: host = 'Videomega' host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({'source': host, 'quality': quality, 'provider': 'Tvrls', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') html = BeautifulSoup(link.content) r = html.findAll('iframe') for u in r: result = u['src'].encode('utf-8') print("WONLINE sources", result) if result.startswith("//"): result = "http:" + result if "wp-embed.php" in result: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile( 'file:\s*"(.+?)",label:"(.+?)",').findall(s) for href, quality in match: quality = google_tag(href) print("WONLINE SCRIPTS", href, quality) sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False }) else: if "google" in result: quality = google_tag(result) else: quality = quality_tag(result) try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( result.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(result) url = url.encode('utf-8') if host in hostDict: sources.append({ 'source': host, 'quality': quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: referer = url link = OPEN_URL(url, timeout='10') print("Watchfilm link", link.content) html = link.content headers = {'Referer': referer} r = re.compile('<a href="(.+?)" target="streamplayer">').findall(html) for result in r: print("Watchfilm SOURCES", result) result = result.encode('utf-8') if result.startswith("//"): result = "http:" + result if "player.watchfilm.to" in result: try: s = OPEN_URL(result, headers=headers) s = s.content print ("WATCHFILM RESULT", s) check1 = re.findall("abouttext:\s*'(.+?)',", s)[0] check2 = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s) check3 = re.compile('file":"(.+?)","res":"(.+?)"').findall(s) if check1: try: print ("WATCHFILM FOUND CHECK 1") quality = quality_tag(check1) h = re.findall("aboutlink:\s*'(.+?)'," , s)[0] if h: h = h.encode('utf-8') sources.append({'source': 'cdn', 'quality':quality, 'provider': 'Watchfilm', 'url': h, 'direct': True, 'debridonly': False}) except: pass h2 = re.findall('var ff =\s*"(.+?)";', s)[0] if h2: try: h2 = h2.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(h2.strip().lower()).netloc)[0] if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Watchfilm', 'url': h2, 'direct': False, 'debridonly': True}) except: pass if check2: print ("WATCHFILM FOUND CHECK 2") try: for href, quality in check2: href = href.encode('utf-8') quality = quality_tag(quality) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': True, 'debridonly': False}) except: pass if check3: print ("WATCHFILM FOUND CHECK 3") try: for href, quality in check3: href = href.encode('utf-8') quality = quality_tag(quality) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': True, 'debridonly': False}) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') html = BeautifulSoup(link.content) r = html.findAll('iframe') for u in r: src = u['src'].encode('utf-8') print("WONLINE sources", src) if src.startswith("//"): src = "http:" + src if "wp-embed.php" in src or "player.123movies" in src: try: s = OPEN_URL(src).content match = get_sources(s) for h in match: files = get_files(h) for href in files: href = href.replace('\\','') quality = google_tag(href) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False}) except: pass elif "raptu.com" in src: try: s = OPEN_URL(src).content match = get_sources(s) for h in match: files = re.compile('"file":"(.+?)","label":"(.+?)",').findall(h) for href, q in files: href = href.replace('\\','') quality = quality_tag(q) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False}) except: pass else: if "google" in src: quality = google_tag(src) else: quality = quality_tag(src) try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(src.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(src) url = url.encode('utf-8') if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources