def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"class": "hover-group.+?"}) title = cleantitle.movie(title) years = [ '>%s<' % str(year), '>%s<' % str(int(year) + 1), '>%s<' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="data-movieid")[0], client.parseDOM(i, "h5")[-1], client.parseDOM(i, "p")[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, self.footer_link % url) result = client.source(url) url = client.parseDOM(result, "a", ret="href") url = [i for i in url if 'play/' in i][0] url = urlparse.urljoin(self.base_link, url) result = client.source(url) url = client.parseDOM(result, "source", ret="src", attrs={"type": "video/.+?"})[0] if '1080p' in url: quality = '1080p' else: quality = 'HD' sources.append({ 'source': 'Sweflix', 'quality': quality, 'provider': 'Sweflix', 'url': url }) return sources except: return sources
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0] except: pass url = urlparse.urlparse(url).path url = url.encode("utf-8") return url except: return
def resolve(url): try: result = client.request(url, close=False) post = {} f = client.parseDOM(result, "Form", attrs={"action": ""}) k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post.update(captcha.request(result)) post = urllib.urlencode(post) request = urllib2.Request(url, post) for i in range(0, 5): try: response = urllib2.urlopen(request, timeout=10) result = response.read() response.close() if "download2" in result: raise Exception() url = client.parseDOM(result, "a", ret="href", attrs={"target": ""})[0] return url except: time.sleep(1) except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr))) query = urlparse.urljoin(self.tvbase_link, query) result = client.source(query) result = client.parseDOM(result, "header", attrs = { "class": "post-title" }) title = cleantitle.tv(title) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = result.replace(self.tvbase_link, '') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(self, url): try: url = urlparse.urlparse(url).path result = "" links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.request(urlparse.urljoin(base_link, url), headers=self.headers) if "showvideo" in str(result): break result = result.decode("iso-8859-1").encode("utf-8") url = client.parseDOM(result, "div", attrs={"id": "showvideo"})[0] url = url.replace("<IFRAME", "<iframe").replace(" SRC=", " src=") url = client.parseDOM(url, "iframe", ret="src")[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)["url"][0] except: pass url = resolvers.request(url) return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return query = self.tvbase_link + self.index_link post = urllib.urlencode({'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % url}) result = client.source(query, post=post) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "tr") show = cleantitle.tv(url) season = '%01d' % int(season) episode = '%02d' % int(episode) result = [client.parseDOM(i, "h1")[0] for i in result] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [(i[0], re.sub('\sSeason(|\s)\d*.+', '', i[1]), re.compile('\sSeason *(\d*) *').findall(i[1])[0]) for i in result] result = [i for i in result if show == cleantitle.tv(i[1])] result = [i[0] for i in result if season == i[2]][0] url = result.split('v=', 1)[-1] url = '%s|%s' % (url, episode) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link post = urllib.urlencode({'searchquery': show, 'searchin': '2'}) result = '' links = [self.link_1, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, "div", attrs = { "class": "widget search-page" })[0] result = client.parseDOM(result, "td") shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) result = client.parseDOM(result, "header", attrs = { "class": "entry-header" }) show = cleantitle.tv(url) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])] result = [i[0] for i in result if show == cleantitle.tv(i[1])][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://www.movdivx.com/%s' % url result = client.request(url) post = {} f = client.parseDOM(result, "Form", attrs = { "action": "" })[0] k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post.update({'method_free': 'Free Download'}) post = urllib.urlencode(post) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, "embed", ret="src") url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output='cookie') result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs={"class": "index show"}) result = [(client.parseDOM(i, "a", attrs={"class": "name"})[0], client.parseDOM(i, "span", attrs={"class": "value"})[0], client.parseDOM(i, "a", ret="href")[0]) for i in result] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [ i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://www.movdivx.com/%s' % url result = client.request(url) post = {} f = client.parseDOM(result, "Form", attrs={"action": ""})[0] k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({ i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0] }) post.update({'method_free': 'Free Download'}) post = urllib.urlencode(post) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] url = jsunpack.unpack(result) return url except: return
def get_movie(self, imdb, title, year): try: search = 'http://www.omdbapi.com/?i=tt%s' % imdb search = client.source(search) search = json.loads(search) country = [i.strip() for i in search['Country'].split(',')] if not 'India' in country: return languages = ['hindi', 'tamil', 'telugu', 'malayalam'] language = [i.strip().lower() for i in search['Language'].split(',')] language = [i for i in language if any(x == i for x in languages)][0] query = self.search_link % (urllib.quote_plus(title), language) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "search-category" }) result = [i for i in result if 'Movies' in client.parseDOM(i, "p")[0]][0] result = client.parseDOM(result, "li") title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] r = [i for i in result if any(x in i[1] for x in years)] if not len(r) == 0: result = r result = [i[0] for i in result if title == cleantitle.movie(i[1])][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = url.replace('../', '/') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" }) r += client.parseDOM(result, "li", attrs = { "class": "element.+?" }) show = cleantitle.tv(url) result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?): Season (\d*), Episode (\d*)<').findall(i)) for i in r] result = [(i[0][0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])] result = [i[0] for i in result if show == cleantitle.tv(i[1])][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(url): try: user = control.setting('movreel_user') password = control.setting('movreel_password') login = '******' post = {'op': 'login', 'login': user, 'password': password, 'redirect': url} post = urllib.urlencode(post) result = client.request(url, close=False) result += client.request(login, post=post, close=False) post = {} f = client.parseDOM(result, "Form", attrs = { "name": "F1" })[-1] k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post.update({'method_free': '', 'method_premium': ''}) post = urllib.urlencode(post) request = urllib2.Request(url, post) for i in range(0, 3): try: response = urllib2.urlopen(request, timeout=10) result = response.read() response.close() url = re.compile('(<a .+?</a>)').findall(result) url = [i for i in url if 'Download Link' in i][-1] url = client.parseDOM(url, "a", ret="href")[0] return url except: time.sleep(1) except: return
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://180upload.com/embed-%s.html' % url result = client.request(url) post = {} f = client.parseDOM(result, "form", attrs={"id": "captchaForm"})[0] k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({ i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0] }) post = urllib.urlencode(post) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] url = jsunpack.unpack(result) return url except: return
def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, "Form", attrs = { "action": "" }) k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post.update({'method_free': 'Watch Free!'}) post = urllib.urlencode(post) result = client.request(url, post=post) result = result.replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '') swfUrl = re.compile('\.embedSWF\("(.+?)"').findall(result)[0] swfUrl = urlparse.urljoin(url, swfUrl) streamer = re.compile('flashvars=.+?"file":"(.+?)"').findall(result)[0] playpath = re.compile('flashvars=.+?p2pkey:"(.+?)"').findall(result)[0] url = '%s playpath=%s conn=S:%s pageUrl=%s swfUrl=%s swfVfy=true timeout=20' % (streamer, playpath, playpath, url, swfUrl) return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output="cookie") result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs={"class": "index show"}) result = [ ( client.parseDOM(i, "a", attrs={"class": "name"})[0], client.parseDOM(i, "span", attrs={"class": "value"})[0], client.parseDOM(i, "a", ret="href")[0], ) for i in result ] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def resolve(self, url): try: url = urlparse.urlparse(url).path result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.request(urlparse.urljoin(base_link, url), headers=self.headers) if 'showvideo' in str(result): break result = result.decode('iso-8859-1').encode('utf-8') url = client.parseDOM(result, "div", attrs={"id": "showvideo"})[0] url = url.replace('<IFRAME', '<iframe').replace(' SRC=', ' src=') url = client.parseDOM(url, "iframe", ret="src")[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0] except: pass url = resolvers.request(url) return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "home_post_cont.+?" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "img", ret="title")[0]) for i in result] result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result] result = [(i[0], client.parseDOM(i[1], "a")) for i in result] result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def resolve(url): try: result = client.request(url, mobile=True, close=False) try: post = {} f = client.parseDOM(result, "Form", attrs = { "method": "POST" })[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post = urllib.urlencode(post) except: post=None for i in range(0, 10): try: result = client.request(url, post=post, mobile=True, close=False) result = result.replace('\n','') result = re.compile('sources *: *\[.+?\]').findall(result)[-1] result = re.compile('file *: *"(http.+?)"').findall(result) url = [i for i in result if '.m3u8' in i] if len(url) > 0: return url[0] url = [i for i in result if not '.m3u8' in i] if len(url) > 0: return url[0] except: time.sleep(1) except: return
def resolve(self, url): try: result = client.request(url) if not "my_video" in result: cookie = client.request(self.sign_link, post=self.key_link, output='cookie') result = client.request(url, cookie=cookie) url = None try: url = client.parseDOM(result, "source", ret="src", attrs={"type": "video/webm"})[0] except: pass try: url = client.parseDOM(result, "source", ret="src", attrs={"type": "video/mp4"})[0] except: pass if url == None: return url = urlparse.urljoin(self.base_link, url) url = '%s|Cookie=%s' % (url, urllib.quote_plus('video=true')) return url except: return
def resolve(url): try: result = client.request(url, close=False) post = {} f = client.parseDOM(result, "Form", attrs={"action": ""}) k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({ i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0] }) post.update(captcha.request(result)) post = urllib.urlencode(post) request = urllib2.Request(url, post) for i in range(0, 5): try: response = urllib2.urlopen(request, timeout=10) result = response.read() response.close() if 'download2' in result: raise Exception() url = client.parseDOM(result, "a", ret="href", attrs={"target": ""})[0] return url except: time.sleep(1) except: return
def get_show(self, imdb, tvdb, show, show_alt, year): # import logging # LOG_FILENAME = '/home/keeganmccallum/test.log' # logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) # logging.debug(','.join([imdb, tvdb, show, show_alt, year])) try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output='cookie') result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs = { "class": "index show" }) result = [(client.parseDOM(i, "a", attrs = { "class": "name" })[0], client.parseDOM(i, "span", attrs = { "class": "value" })[0], client.parseDOM(i, "a", ret="href")[0]) for i in result] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year)+1), str(int(year)-1)] result = [i for i in result if any(x in i[1] for x in years)] result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" }) r += client.parseDOM(result, "li", attrs = { "class": "element.+?" }) r += client.parseDOM(result, "header", attrs = { "class": "entry-header" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in r] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.sub('<.+?>', '', i[1])) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(re.sub(r'[\W_]+', ' ', title))) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.replace('–','-').replace('’','\'') result = client.parseDOM(result, "ul", attrs = { "class": "listing-videos.+?" })[0] result = client.parseDOM(result, "li", attrs = { "class": ".+?" }) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [(i[0], re.sub('\s(\(|)(\d{4})(.+)', '', i[1]), re.compile('(\d{4})').findall(i[1])) for i in result] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, "Form", attrs={"method": "POST"})[0] k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({ i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0] }) post.update({'method_free': 'Free Download'}) post = urllib.urlencode(post) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[0] url = jsunpack.unpack(result) result = client.request(url) url = client.parseDOM(result, "file")[0] return url except: return
def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, "Form", attrs={"action": ""}) k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({ i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0] }) post.update({'method_free': 'Watch Free!'}) post = urllib.urlencode(post) result = client.request(url, post=post) result = client.parseDOM(result, "script", attrs={"type": ".+?"}) result = (''.join(result)).replace(' ', '').replace('\'', '"') result = re.compile('file:"(http.+?m3u8)"').findall(result) for u in result: url = client.request(u, output='geturl') if not url == None: return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr))) query = urlparse.urljoin(self.tvbase_link, query) result = client.source(query) result = client.parseDOM(result, "header", attrs={"class": "post-title"}) title = cleantitle.tv(title) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = result.replace(self.tvbase_link, '') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(self, url): try: result = client.request(url) if not "my_video" in result: cookie = client.request(self.sign_link, post=self.key_link, output="cookie") result = client.request(url, cookie=cookie) url = None try: url = client.parseDOM(result, "source", ret="src", attrs={"type": "video/webm"})[0] except: pass try: url = client.parseDOM(result, "source", ret="src", attrs={"type": "video/mp4"})[0] except: pass if url == None: return url = urlparse.urljoin(self.base_link, url) url = "%s|Cookie=%s" % (url, urllib.quote_plus("video=true")) return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) if result == None: result = client.source(self.agent_link + urllib.quote_plus(query)) result = result.replace('> ', '>').replace(' <', '<') r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" }) r += client.parseDOM(result, "li", attrs = { "class": "element.+?" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?\(\d{4}\))<').findall(i)) for i in r] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1].split('>')[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) u = client.parseDOM(result, "meta", ret="content", attrs={"property": "og:url"})[0] links = re.compile( '<a href="([?]link_id=.+?)".+?>(.+?)</a>').findall(result) links = [u + i[0] for i in links if 'server' in i[1].lower()] for u in links[:3]: try: result = client.source(u) url = client.parseDOM(result, "source", ret="src", attrs={"type": "video/.+?"}) if len(url) > 0: i = googleplus.tag(url[0])[0] sources.append({ 'source': 'GVideo', 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url'] }) url = re.compile( 'proxy[.]link=clickplay[*](.+?)"').findall(result)[-1] url = gkplugins.decrypter(198, 128).decrypt( url, base64.urlsafe_b64decode( 'bW5pcUpUcUJVOFozS1FVZWpTb00='), 'ECB').split('\0')[0] if 'google' in url: source = 'GVideo' elif 'vk.com' in url: source = 'VK' else: raise Exception() url = resolvers.request(url) for i in url: sources.append({ 'source': source, 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url'] }) except: pass return sources except: return sources
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://grifthost.com/embed-%s.html' % url result = client.request(url) try: post = {} f = client.parseDOM(result, "Form", attrs = { "method": "POST" })[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post = urllib.urlencode(post) result = client.request(url, post=post) except: pass result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, "embed", ret="src") url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "id": "post-.+?" }) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs = { "class": "status status-year" }), client.parseDOM(i, "div", attrs = { "class": "mark-8" })) for i in result] result = [(i[0], i[1], i[2][0], i[3]) for i in result if len(i[2]) > 0] result = [(i[0], i[1], i[2], i[3], re.compile('Season (\d*)$').findall(i[1])) for i in result] result = [(i[0], i[1], i[2], i[3]) for i in result if len(i[4]) == 0] result = [(i[0], i[1], i[2]) for i in result if len(i[3]) == 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://180upload.com/embed-%s.html' % url result = client.request(url) post = {} f = client.parseDOM(result, "form", attrs = { "id": "captchaForm" })[0] k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post = urllib.urlencode(post) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, "embed", ret="src") url += re.compile("'file' *, *'(.+?)'").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources result = "" links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, url), headers=self.headers) if "link_name" in str(result): break result = result.decode("iso-8859-1").encode("utf-8") result = result.replace("\n", "") quality = re.compile(">Links - Quality(.+?)<").findall(result)[0] quality = quality.strip() if quality == "CAM" or quality == "TS": quality = "CAM" elif quality == "SCREENER": quality = "SCR" else: quality = "SD" links = client.parseDOM(result, "div", attrs={"id": "links"})[0] links = client.parseDOM(links, "ul") for i in links: try: host = client.parseDOM(i, "li", attrs={"id": "link_name"})[-1] try: host = client.parseDOM(host, "span", attrs={"class": "google-src-text"})[0] except: pass host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode("utf-8") url = client.parseDOM(i, "a", ret="href")[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0] except: pass if not url.startswith("http"): url = urlparse.urljoin(self.base_link, url) url = url.encode("utf-8") sources.append({"source": host, "quality": quality, "provider": "Movie25", "url": url}) except: pass return sources except: return sources
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) if 'movie_table' in str(result): break result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [ (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result ] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if title == cleantitle.movie(i[1])] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:10]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str('tt' + imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def resolve(url): try: result = client.request(url, mobile=True) url = client.parseDOM(result, "video")[0] url = client.parseDOM(url, "source", ret="src", attrs = { "type": ".+?" })[0] return url except: return
def resolve(url): try: result = client.request(url) url = client.parseDOM(result, "span", attrs = { "id": "realdownload" })[0] url = client.parseDOM(url, "a", ret="href")[0] return url except: return
def resolve(url): try: result = client.request(url) url = client.parseDOM(result, "span", attrs={"id": "realdownload"})[0] url = client.parseDOM(url, "a", ret="href")[0] return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) result = client.source(query) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = client.parseDOM(result, "h3", attrs={"class": ".+?"}) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [ i for i in result if any(x in i[0] for x in years) or any(x in i[1] for x in years) ] result = [ i[0] for i in result if title in cleantitle.movie(i[0]) or title in cleantitle.movie(i[1]) ][0] result = result.replace('/tag/', '/') result = cloudflare.source(result) r = client.parseDOM(result, "title")[0] t = re.sub('(\.|\_|\(|\[|\s)(\d{4}|3D)(\.|\_|\)|\]|\s)(.+)', '', r) if not title == cleantitle.movie(t): raise Exception() y = re.compile('[\.|\_|\(|\[|\s](\d{4})[\.|\_|\)|\]|\s]').findall( r)[0] if not any(x == y for x in years): raise Exception() result = client.parseDOM(result, "link", ret="href", attrs={"rel": "canonical"})[0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) result = result.decode('iso-8859-1').encode('utf-8') links = client.parseDOM(result, "div", attrs={"class": "lang"})[0] links = client.parseDOM(links, "div", attrs={"class": "movie_link.+?"}) fmt = [client.parseDOM(i, "h4")[0] for i in links] fmt = [re.findall('\w+', i.lower()) for i in fmt] fmt = sum(fmt, []) if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR' elif any(x in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts' ] for x in fmt): quality = 'CAM' else: quality = 'SD' for i in links: try: host = client.parseDOM(i, "a", attrs={"target": ".+?"})[0] host = host.split('.', 1)[0] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, "a", ret="href")[0] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'Vidics', 'url': url }) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources content = re.compile('/\d{4}/\d{2}/').findall(url) if len(content) > 0: url = urlparse.urljoin(self.tvbase_link, url) else: url = urlparse.urljoin(self.base_link, url) result = client.source(url) links = client.parseDOM(result, "td", attrs={"class": "even tdhost"}) links += client.parseDOM(result, "td", attrs={"class": "odd tdhost"}) q = re.compile('<label>Quality</label>(.+?)<').findall(result) if len(q) > 0: q = q[0] else: q = '' if q.endswith(('CAM', 'TS')): quality = 'CAM' else: quality = 'SD' for i in links: try: host = client.parseDOM(i, "a")[0] host = host.split('<', 1)[0] host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, "a", ret="href")[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'WSO', 'url': url }) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources t = url.split('v=', 1)[-1].rsplit('&', 1)[0] url = self.video_link % t result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, url), headers=self.headers) if 'ripdiv' in str(result): break result = result.decode('iso-8859-1').encode('utf-8') sec = re.compile('lastChild[.]value="(.+?)"').findall(result)[0] links = client.parseDOM(result, "div", attrs = { "class": "ripdiv" }) hd = [i for i in links if '>HD 720p<' in i] sd = [i for i in links if '>DVDRip / Standard Def<' in i] if len(sd) == 0: sd = [i for i in links if '>DVD Screener<' in i] if len(sd) == 0: sd = [i for i in links if '>R5/R6 DVDRip<' in i] if len(hd) > 0: hd = hd[0].split('<p>') if len(sd) > 0: sd = sd[0].split('<p>') links = [(i, 'HD') for i in hd] + [(i, 'SD') for i in sd] for i in links: try: quality = i[1] host = client.parseDOM(i[0], "a")[-1] host = re.sub('\s|<.+?>|</.+?>|.+?#\d*:', '', host) host = host.strip().lower() if quality == 'HD' and not host in hosthdDict: raise Exception() if quality == 'SD' and not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i[0], "a", ret="onclick")[-1] url = re.compile('[(](.+?)[)]').findall(url)[0] url = 'id=%s&t=%s&sec=%s&s=%s&m=%s&cap=&iqs=&url=' % (url, t, sec, random.randrange(5, 50), random.randrange(100, 300) * -1) url = url.encode('utf-8') sources.append({'source': host, 'quality': quality, 'provider': 'Icefilms', 'url': url}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) result = client.parseDOM(result, "div", attrs={"class": "links"})[0] result = client.parseDOM(result, "tr") result = [(client.parseDOM(i, "td", attrs={"class": "quality_td"})[0], client.parseDOM(i, "a", ret="href")[-1]) for i in result] ts_quality = ['CAM', 'TS'] links = [ i for i in result if not any(x in i[0] for x in ts_quality) ] if len(links) == 0: links = result for i in links: try: url = i[1] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.sub( '.+?/exit/\d*-|[.].+?[.]html|http://(|www[.])|/.+|[.].+$', '', i[1]) host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if any(x in i[0] for x in ts_quality): quality = 'CAM' else: quality = 'SD' sources.append({ 'source': host, 'quality': quality, 'provider': 'Moviestorm', 'url': url }) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) url = re.compile('(<a .+?</a>)').findall(result) url = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in url] url = [(i[0][0], i[1][0]) for i in url if len(i[0]) > 0 and len(i[1]) > 0] try: sources.append({ 'source': 'GVideo', 'quality': '1080p', 'provider': 'Xmovies8', 'url': [ i[0] for i in url if i[1].startswith('1920') and 'google' in i[0] ][0] }) except: pass try: sources.append({ 'source': 'GVideo', 'quality': 'HD', 'provider': 'Xmovies8', 'url': [ i[0] for i in url if i[1].startswith('1280') and 'google' in i[0] ][0] }) except: pass return sources except: return sources
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) result = "" links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) if "movie_table" in str(result): break result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)["u"][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if title == cleantitle.movie(i[1])] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:10]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str("tt" + imdb) in str(result): url = i break except: pass url = url.encode("utf-8") return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '') quality = re.compile('>Links - Quality(.+?)<').findall(result)[0] quality = quality.strip() if quality == 'CAM' or quality == 'TS': quality = 'CAM' elif quality == 'SCREENER': quality = 'SCR' else: quality = 'SD' links = client.parseDOM(result, "div", attrs={"id": "links"})[0] links = client.parseDOM(links, "ul") for i in links: try: host = client.parseDOM(i, "li", attrs={"id": "link_name"})[-1] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, "a", ret="href")[0] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'Movie25', 'url': url }) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, "a", ret="href") u = [i for i in result if '.php' in i and 'i=' in i][0] u = client.replaceHTMLCodes(u) u = urlparse.parse_qs(urlparse.urlparse(u).query)['i'][0] url = gkplugins.decrypter(198,128).decrypt(u,base64.urlsafe_b64decode('b3F5czkzZEswc2FEY3pRNW9NSTE='),'ECB').split('\0')[0] url = resolvers.request(url) if not type(url) == list: raise Exception() for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'YIFYstream', 'url': i['url']}) return sources except: return sources
def resolve(url): try: result = client.request(url) post = {} try: f = client.parseDOM(result, "form", attrs = { "method": "POST" })[0] except: f = '' k = client.parseDOM(f, "input", ret="name") for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post = urllib.urlencode(post) result = client.request(url, post=post) url = re.compile("var\s+lnk\d* *= *'(http.+?)'").findall(result)[0] return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) quality = re.compile('Quality *: *(.+)').findall(result) quality = 'SD' if len(quality) == 0 else quality[0] quality = re.sub('<.+?>', '', quality).strip().upper() if quality == 'SD': quality = 'SD' elif quality == 'HD': quality = 'HD' else: quality = 'CAM' url = client.parseDOM(result, "iframe", ret="src") url = [i for i in url if 'filepup' in i][0] url = filepup.resolve(url) if url == None: raise Exception() sources.append({'source': 'Filepup', 'quality': quality, 'provider': 'Movienight', 'url': url}) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) fmt = re.compile('<strong>Quality</strong>.+?<strong>(.+?)</strong>').findall(result) if len(fmt) > 0: fmt = (' '.join((fmt[0].decode("utf-8").lower().strip()).split())).split(' ') if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR' elif any(x in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts'] for x in fmt): quality = 'CAM' else: quality = 'HD' result = client.parseDOM(result, "div", attrs = { "class": "video-embed" })[0] url = re.compile('hashkey=(.+?)[\'|\"]').findall(result) url += re.compile('[?]ref=(.+?)[\'|\"]').findall(result) url = self.videomega_link % url[0] url = videomega.resolve(url) if url == None: raise Exception() sources.append({'source': 'Videomega', 'quality': quality, 'provider': 'Onlinemovies', 'url': url}) return sources except: return sources
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.source(url) if not "menu season-tabs" in result: cookie = client.source(self.sign_link, post=self.key_link, output='cookie') result = client.source(url, cookie=cookie) result = client.parseDOM( result, "a", ret="data-href", attrs={"href": "#%01d-%01d" % (int(season), int(episode))})[0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return