def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"class": "hover-group.+?"}) title = cleantitle.movie(title) years = [ '>%s<' % str(year), '>%s<' % str(int(year) + 1), '>%s<' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="data-movieid")[0], client.parseDOM(i, "h5")[-1], client.parseDOM(i, "p")[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "id": "post-.+?" }) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs = { "class": "status status-year" }), client.parseDOM(i, "div", attrs = { "class": "mark-8" })) for i in result] result = [(i[0], i[1], i[2][0], i[3]) for i in result if len(i[2]) > 0] result = [(i[0], i[1], i[2], i[3], re.compile('Season (\d*)$').findall(i[1])) for i in result] result = [(i[0], i[1], i[2], i[3]) for i in result if len(i[4]) == 0] result = [(i[0], i[1], i[2]) for i in result if len(i[3]) == 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "ul", attrs={"class": "listing-videos.+?"})[0] result = client.parseDOM(result, "li", attrs={"class": ".+?"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = sucuri.source(query) result = re.compile('showResult\((.*)\)').findall(result)[0] result = json.loads(result) result = result['feed']['entry'] title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [i for i in result if 'movies' in [x['term'].lower() for x in i['category']]] result = [[x for x in i['link'] if x['rel'] == 'alternate' and x['type'] == 'text/html'][0] for i in result] result = [(i['href'], i['title']) for i in result] result = [(i[0], re.compile('(.+?) (\d{4})(.+)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [(i[0], i[1], i[2]) for i in result if not 'TS' in i[3] and not 'CAM' in i[3]] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" }) r += client.parseDOM(result, "li", attrs = { "class": "element.+?" }) r += client.parseDOM(result, "header", attrs = { "class": "entry-header" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in r] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.sub('<.+?>', '', i[1])) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "home_post_cont.+?" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "img", ret="title")[0]) for i in result] result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result] result = [(i[0], client.parseDOM(i[1], "a")) for i in result] result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0] except: pass url = urlparse.urlparse(url).path url = url.encode("utf-8") return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'action': 'ajaxy_sf', 'sf_value': title}) result = client.source(query, post=post) result = result.replace('–', '-').replace('’', '\'') result = json.loads(result) result = result['post']['all'] title = cleantitle.movie(title) result = [ i['post_link'] for i in result if title == cleantitle.movie(i['post_title']) ][0] check = client.source(result) if not str('tt' + imdb) in check: raise Exception() try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: search = 'http://www.omdbapi.com/?i=tt%s' % imdb search = client.source(search) search = json.loads(search) country = [i.strip() for i in search['Country'].split(',')] if not 'India' in country: return languages = ['hindi', 'tamil', 'telugu', 'malayalam'] language = [i.strip().lower() for i in search['Language'].split(',')] language = [i for i in language if any(x == i for x in languages)][0] query = self.search_link % (urllib.quote_plus(title), language) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "search-category" }) result = [i for i in result if 'Movies' in client.parseDOM(i, "p")[0]][0] result = client.parseDOM(result, "li") title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] r = [i for i in result if any(x in i[1] for x in years)] if not len(r) == 0: result = r result = [i[0] for i in result if title == cleantitle.movie(i[1])][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = url.replace('../', '/') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) result = result['data']['films'] title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(i['id'], i['title'].encode('utf-8')) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])][:2] result = [(i[0], self.base_link + self.detail_link % i[0]) for i in result] result = [(i[0], client.source(i[1])) for i in result] result = [(i[0], json.loads(i[1])['data']['state']) for i in result] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(re.sub(r'[\W_]+', ' ', title))) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.replace('–','-').replace('’','\'') result = client.parseDOM(result, "ul", attrs = { "class": "listing-videos.+?" })[0] result = client.parseDOM(result, "li", attrs = { "class": ".+?" }) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [(i[0], re.sub('\s(\(|)(\d{4})(.+)', '', i[1]), re.compile('(\d{4})').findall(i[1])) for i in result] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin( self.base_link, self.search_link % (urllib.quote_plus(title))) query += self.__extra() result = client.source(query) result = json.loads(result) result = self.__decrypt(self.data_key, result['data']) result = json.loads(result) result = result['categories'] title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8'), str(i['type_film'])) for i in result] result = [i for i in result if i[2] == '0'] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) if result == None: result = client.source(self.agent_link + urllib.quote_plus(query)) result = result.replace('\r','').replace('\n','').replace('\t','') result = re.compile('(<div id="*\d*.+?</div>)').findall(result) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(re.compile('id="*(\d*)"*').findall(i), re.compile('<h4>(.+?)</h4>').findall(i), re.compile('Releasedatum *: *(\d{4})').findall(i)) for i in result] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [(i[0], i[1].rsplit('</span>')[0].split('>')[-1].strip(), i[2]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link post = urllib.urlencode({'searchquery': title, 'searchin': '1'}) result = '' links = [self.link_1, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, "div", attrs = { "class": "widget search-page" })[0] result = client.parseDOM(result, "td") title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) if result == None: result = client.source(self.agent_link + urllib.quote_plus(query)) result = result.replace('> ', '>').replace(' <', '<') r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" }) r += client.parseDOM(result, "li", attrs = { "class": "element.+?" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?\(\d{4}\))<').findall(i)) for i in r] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1].split('>')[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) if 'movie_table' in str(result): break result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [ (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result ] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if title == cleantitle.movie(i[1])] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:10]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str('tt' + imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) result = client.source(query) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = client.parseDOM(result, "h3", attrs={"class": ".+?"}) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [ i for i in result if any(x in i[0] for x in years) or any(x in i[1] for x in years) ] result = [ i[0] for i in result if title in cleantitle.movie(i[0]) or title in cleantitle.movie(i[1]) ][0] result = result.replace('/tag/', '/') result = cloudflare.source(result) r = client.parseDOM(result, "title")[0] t = re.sub('(\.|\_|\(|\[|\s)(\d{4}|3D)(\.|\_|\)|\]|\s)(.+)', '', r) if not title == cleantitle.movie(t): raise Exception() y = re.compile('[\.|\_|\(|\[|\s](\d{4})[\.|\_|\)|\]|\s]').findall( r)[0] if not any(x == y for x in years): raise Exception() result = client.parseDOM(result, "link", ret="href", attrs={"rel": "canonical"})[0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] match = client.parseDOM(result, "div", attrs={"class": "post-panel"}) match = client.parseDOM(match, "h2") if len(match) == 0: t = client.parseDOM(result, "meta", ret="content", attrs={"property": "og:title"})[0] t = re.compile(': (.+?\(\d{4}\))').findall(t)[0] if not title == cleantitle.movie(t): return if not any(x in t for x in years): return result = client.parseDOM(result, "link", ret="href", attrs={"rel": "canonical"})[0] else: result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in match] result = [i for i in result if title == cleantitle.movie(i[1])] result = [ i[0] for i in result if any(x in i[1] for x in years) ][0] if not self.base_link in result: return try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) result = "" links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) if "movie_table" in str(result): break result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "div", attrs={"class": "movie_table"}) title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)["u"][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if title == cleantitle.movie(i[1])] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:10]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str("tt" + imdb) in str(result): url = i break except: pass url = url.encode("utf-8") return url except: return
def get_movie(self, imdb, title, year): try: result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, self.key_link), headers=self.headers) if 'searchform' in str(result): break key = client.parseDOM(result, "input", ret="value", attrs = { "name": "key" })[0] query = self.moviesearch_link % (urllib.quote_plus(re.sub('\'', '', title)), key) result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs = { "class": "index_item.+?" }) title = 'watch' + cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if title == cleantitle.movie(i[1])] match2 = [i[0] for i in result] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str('tt' + imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = sucuri.source(query) result = re.compile('showResult\((.*)\)').findall(result)[0] result = json.loads(result) result = result['feed']['entry'] title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [ i for i in result if 'movies' in [x['term'].lower() for x in i['category']] ] result = [[ x for x in i['link'] if x['rel'] == 'alternate' and x['type'] == 'text/html' ][0] for i in result] result = [(i['href'], i['title']) for i in result] result = [(i[0], re.compile('(.+?) (\d{4})(.+)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [(i[0], i[1], i[2]) for i in result if not 'TS' in i[3] and not 'CAM' in i[3]] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(title))) result = client.source(query) result = json.loads(result) result = result['categories'] title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8')) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin( self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) if result == None: result = client.source(self.agent_link + urllib.quote_plus(query)) result = result.replace('> ', '>').replace(' <', '<') r = client.parseDOM(result, "li", attrs={"class": "first element.+?"}) r += client.parseDOM(result, "li", attrs={"class": "element.+?"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?\(\d{4}\))<').findall(i)) for i in r] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1].split('>')[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "hover-group.+?" }) title = cleantitle.movie(title) years = ['>%s<' % str(year), '>%s<' % str(int(year)+1), '>%s<' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="data-movieid")[0], client.parseDOM(i, "h5")[-1], client.parseDOM(i, "p")[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.moviesearch_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "searchResult" }) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "h2", ret="title")[0], client.parseDOM(i, "span", attrs = { "itemprop": "copyrightYear" })) for i in result] result = [i for i in result if len(i[2]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2][0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) result = client.source(query) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = client.parseDOM(result, "h3", attrs = { "class": ".+?" }) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[0] for x in years) or any(x in i[1] for x in years)] result = [i[0] for i in result if title in cleantitle.movie(i[0]) or title in cleantitle.movie(i[1])][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link post = urllib.urlencode({'searchquery': title, 'searchin': '1'}) result = '' links = [self.link_1, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, "div", attrs={"class": "widget search-page"})[0] result = client.parseDOM(result, "td") title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urllib.quote_plus(title.replace(' ', '-').rsplit(':', 1)[0]) query = urlparse.urljoin(self.base_link, self.search_link % query) result = client.source(query, mobile=True) result = client.parseDOM(result, "ul", attrs = { "class": "movies.+?" }) result = client.parseDOM(result, "li") title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "h3")[0]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.base_link + self.index_link post = urllib.urlencode({'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % title}) result = client.source(query, post=post) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "tr") title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [client.parseDOM(i, "h1")[0] for i in result] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = result.split('v=', 1)[-1] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: search = "http://www.omdbapi.com/?i=tt%s" % imdb search = client.source(search) search = json.loads(search) country = [i.strip() for i in search["Country"].split(",")] if not "India" in country: return languages = ["hindi", "tamil", "telugu", "malayalam"] language = [i.strip().lower() for i in search["Language"].split(",")] language = [i for i in language if any(x == i for x in languages)][0] query = self.search_link % (urllib.quote_plus(title), language) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"class": "search-category"}) result = [i for i in result if "Movies" in client.parseDOM(i, "p")[0]][0] result = client.parseDOM(result, "li") title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] r = [i for i in result if any(x in i[1] for x in years)] if not len(r) == 0: result = r result = [i[0] for i in result if title == cleantitle.movie(i[1])][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = url.replace("../", "/") url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(title))) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "cell_container" }) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [(i[0], re.compile('(.+?) [(](\d{4})[)]').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus( re.sub(r'[\W_]+', ' ', title))) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = result.replace('–', '-').replace('’', '\'') result = client.parseDOM(result, "ul", attrs={"class": "listing-videos.+?"})[0] result = client.parseDOM(result, "li", attrs={"class": ".+?"}) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [(i[0], re.sub('\s(\(|)(\d{4})(.+)', '', i[1]), re.compile('(\d{4})').findall(i[1])) for i in result] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) result = client.source(query) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = client.parseDOM(result, "h3", attrs={"class": ".+?"}) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [ i for i in result if any(x in i[0] for x in years) or any(x in i[1] for x in years) ] result = [ i[0] for i in result if title in cleantitle.movie(i[0]) or title in cleantitle.movie(i[1]) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] match = client.parseDOM(result, "div", attrs={"class": "post-panel"}) match = client.parseDOM(match, "h2") if len(match) == 0: t = client.parseDOM(result, "meta", ret="content", attrs={"property": "og:title"})[0] t = re.compile(": (.+?\(\d{4}\))").findall(t)[0] if not title == cleantitle.movie(t): return if not any(x in t for x in years): return result = client.parseDOM(result, "link", ret="href", attrs={"rel": "canonical"})[0] else: result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in match] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] if not self.base_link in result: return try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'action': 'ajaxy_sf', 'sf_value': title}) result = client.source(query, post=post) result = result.replace('–','-').replace('’','\'') result = json.loads(result) result = result['post']['all'] title = cleantitle.movie(title) result = [i['post_link'] for i in result if title == cleantitle.movie(i['post_title'])][0] check = client.source(result) if not str('tt' + imdb) in check: raise Exception() try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(title))) query += self.__extra() result = client.source(query) result = json.loads(result) result = self.__decrypt(self.data_key, result["data"]) result = json.loads(result) result = result["categories"] title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(i["catalog_id"], i["catalog_name"].encode("utf-8"), str(i["type_film"])) for i in result] result = [i for i in result if i[2] == "0"] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode("utf-8") return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) result = result['data']['films'] title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(i['id'], i['title'].encode('utf-8')) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])][:2] result = [(i[0], self.base_link + self.detail_link % i[0]) for i in result] result = [(i[0], client.source(i[1])) for i in result] result = [(i[0], json.loads(i[1])['data']['state']) for i in result] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin( self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) result = client.parseDOM(result, "header", attrs={"class": "entry-header"}) title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.moviesearch_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"class": "searchResult"}) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "h2", ret="title")[0], client.parseDOM(i, "span", attrs={"itemprop": "copyrightYear"})) for i in result] result = [i for i in result if len(i[2]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [ i[0] for i in result if any(x in i[2][0] for x in years) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.base_link + self.index_link post = urllib.urlencode({ 'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % title }) result = client.source(query, post=post) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "tr") title = cleantitle.movie(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [client.parseDOM(i, "h1")[0] for i in result] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = result.split('v=', 1)[-1] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "ul", attrs={"class": "listing-videos.+?"})[0] result = client.parseDOM(result, "li", attrs={"class": ".+?"}) title = cleantitle.movie(title) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title)) result = cloudflare.source(query) result = client.parseDOM(result, "header", attrs = { "class": "entry-header" }) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.moviesearch_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs = { "class": "item" }) title = 'watch' + cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if '-movie-online-' in i[0]] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] result = result.split('-movie-online-', 1)[0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20110101 Firefox/34.0"} # params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url), urllib.quote_plus(','.join(locDict))) params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url)) links = [] q = urlparse.urljoin(self.base_link, self.download_link % params) try: links += json.loads(client.source(q, headers=headers))["result"] except: pass q = urlparse.urljoin(self.base_link, self.stream_link % params) try: links += json.loads(client.source(q, headers=headers))["result"] except: pass title, hdlr = re.compile("(.+?) (\d{4}|S\d*E\d*)$").findall(url)[0] if hdlr.isdigit(): type = "movie" title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr) + 1), str(int(hdlr) - 1)] else: type = "episode" title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: if len(i["hosterurls"]) > 1: raise Exception() if not i["extension"] in ["mkv", "mp4"]: raise Exception() host = i["hostername"] host = host.rsplit(".", 1)[0] host = host.strip().lower() if not (host in hosthdDict or host in hostDict): raise Exception() host = client.replaceHTMLCodes(host) host = host.encode("utf-8") T = client.replaceHTMLCodes(i["title"]) N = client.replaceHTMLCodes(i["sourcetitle"]) t = re.sub("(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)", "", T) if type == "movie": t = cleantitle.movie(t) else: t = cleantitle.tv(t) n = re.sub("(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)", "", N) if type == "movie": n = cleantitle.movie(n) else: n = cleantitle.tv(n) if not (t == title or n == title): raise Exception() y = re.compile("[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]").findall(T) y += re.compile("[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]").findall(N) y = y[0] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub("(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)", "", T) fmt += " " + re.sub("(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)", "", N) fmt = re.split("\.|\_|\(|\)|\[|\]|\s|\-", fmt) fmt = [x.lower() for x in fmt] if "1080p" in fmt: quality = "1080p" elif "720p" in fmt: quality = "HD" else: quality = "SD" if any( x in ["dvdscr", "r5", "r6", "camrip", "tsrip", "hdcam", "hdts", "dvdcam", "dvdts", "cam", "ts"] for x in fmt ): raise Exception() if quality in ["1080p", "HD"] and not host in hosthdDict: raise Exception() if quality == "SD" and not host in hostDict: raise Exception() url = i["hosterurls"][0]["url"] url = client.replaceHTMLCodes(url) url = url.encode("utf-8") info = [] size = i["sizeinternal"] if type == "movie" and 1 < size < 100000000: raise Exception() size = float(size) / 1073741824 if not size == 0: info.append("%.2f GB" % size) if "3d" in fmt: info.append("3D") info = " | ".join(info) sources.append({"source": host, "quality": quality, "provider": "Alluc", "url": url, "info": info}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20110101 Firefox/34.0'} #params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url), urllib.quote_plus(','.join(locDict))) params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url)) links = [] q = urlparse.urljoin(self.base_link, self.download_link % params) try: links += json.loads(client.source(q, headers=headers))['result'] except: pass q = urlparse.urljoin(self.base_link, self.stream_link % params) try: links += json.loads(client.source(q, headers=headers))['result'] except: pass title, hdlr = re.compile('(.+?) (\d{4}|S\d*E\d*)$').findall(url)[0] if hdlr.isdigit(): type = 'movie' title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr)+1), str(int(hdlr)-1)] else: type = 'episode' title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: if len(i['hosterurls']) > 1: raise Exception() if not i['extension'] in ['mkv', 'mp4']: raise Exception() host = i['hostername'] host = host.rsplit('.', 1)[0] host = host.strip().lower() if not (host in hosthdDict or host in hostDict): raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') T = client.replaceHTMLCodes(i['title']) N = client.replaceHTMLCodes(i['sourcetitle']) t = re.sub('(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)', '', T) if type == 'movie': t = cleantitle.movie(t) else: t = cleantitle.tv(t) n = re.sub('(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)', '', N) if type == 'movie': n = cleantitle.movie(n) else: n = cleantitle.tv(n) if not (t == title or n == title): raise Exception() y = re.compile('[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]').findall(T) y += re.compile('[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]').findall(N) y = y[0] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub('(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)', '', T) fmt += ' ' + re.sub('(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)', '', N) fmt = re.split('\.|\_|\(|\)|\[|\]|\s|\-', fmt) fmt = [x.lower() for x in fmt] if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(x in ['dvdscr', 'r5', 'r6', 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts'] for x in fmt): raise Exception() if quality in ['1080p', 'HD'] and not host in hosthdDict: raise Exception() if quality == 'SD' and not host in hostDict: raise Exception() url = i['hosterurls'][0]['url'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') info = [] size = i['sizeinternal'] if type == 'movie' and 1 < size < 100000000: raise Exception() size = float(size)/1073741824 if not size == 0: info.append('%.2f GB' % size) if '3d' in fmt: info.append('3D') info = ' | '.join(info) sources.append({'source': host, 'quality': quality, 'provider': 'Alluc', 'url': url, 'info': info}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() query = urlparse.urljoin(self.base_link, self.login_link) post = urllib.urlencode({'login': self.user, 'pwd': self.password}) cookie = client.source(query, post=post, output='cookie') query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'sort': 'relevance', 'filter': 'all', 'moderated': 'yes', 'offset': '0', 'limit': '100', 'match': 'all', 'q': url}) result = client.source(query, post=post, cookie=cookie) result = json.loads(result) links = result['files'] title, hdlr = re.compile('(.+?) (\d{4}|S\d*E\d*)$').findall(url)[0] if hdlr.isdigit(): type = 'movie' title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr)+1), str(int(hdlr)-1)] else: type = 'episode' title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: name = i['name'] name = client.replaceHTMLCodes(name) info = i['video_info'] if type == 'movie' and not '#0:1(eng): Audio:' in info: raise Exception() t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', name) if type == 'movie': t = cleantitle.movie(t) else: t = cleantitle.tv(t) if not t == title: raise Exception() y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(name)[-1] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\)|\]|\s)', '', name) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [x.lower() for x in fmt] if any(x.endswith(('subs', 'sub', 'dubbed', 'dub')) for x in fmt): raise Exception() if any(x in ['extras'] for x in fmt): raise Exception() res = i['video_info'].replace('\n','') res = re.compile(', (\d*)x\d*').findall(res)[0] res = int(res) if 1900 <= res <= 1920: quality = '1080p' elif 1200 <= res <= 1280: quality = 'HD' else: quality = 'SD' if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR' elif any(x in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts'] for x in fmt): quality = 'CAM' size = i['size'] size = float(size)/1073741824 if int(size) > 2 and not quality in ['1080p', 'HD']: raise Exception() if int(size) > 5: raise Exception() info = i['video_info'].replace('\n','') v = re.compile('Video: (.+?),').findall(info)[0] a = re.compile('Audio: (.+?), .+?, (.+?),').findall(info)[0] if '3d' in fmt: q = ' | 3D' else: q = '' info = '%.2f GB%s | %s | %s | %s' % (size, q, v, a[0], a[1]) info = re.sub('\(.+?\)', '', info) info = info.replace('stereo', '2.0') info = ' '.join(info.split()) url = i['url_pls'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({'source': 'Furk', 'quality': quality, 'provider': 'Furk', 'url': url, 'info': info}) except: pass if not all(i['quality'] in ['CAM', 'SCR'] for i in sources): sources = [i for i in sources if not i['quality'] in ['CAM', 'SCR']] return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() query = urlparse.urljoin(self.base_link, self.login_link) post = urllib.urlencode({'login': self.user, 'pwd': self.password}) cookie = client.source(query, post=post, output='cookie') query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({ 'sort': 'relevance', 'filter': 'all', 'moderated': 'yes', 'offset': '0', 'limit': '100', 'match': 'all', 'q': url }) result = client.source(query, post=post, cookie=cookie) result = json.loads(result) links = result['files'] title, hdlr = re.compile('(.+?) (\d{4}|S\d*E\d*)$').findall(url)[0] if hdlr.isdigit(): type = 'movie' title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr) + 1), str(int(hdlr) - 1)] else: type = 'episode' title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: name = i['name'] name = client.replaceHTMLCodes(name) info = i['video_info'] if type == 'movie' and not '#0:1(eng): Audio:' in info: raise Exception() t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', name) if type == 'movie': t = cleantitle.movie(t) else: t = cleantitle.tv(t) if not t == title: raise Exception() y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]' ).findall(name)[-1] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\)|\]|\s)', '', name) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [x.lower() for x in fmt] if any( x.endswith(('subs', 'sub', 'dubbed', 'dub')) for x in fmt): raise Exception() if any(x in ['extras'] for x in fmt): raise Exception() res = i['video_info'].replace('\n', '') res = re.compile(', (\d*)x\d*').findall(res)[0] res = int(res) if 1900 <= res <= 1920: quality = '1080p' elif 1200 <= res <= 1280: quality = 'HD' else: quality = 'SD' if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR' elif any(x in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts' ] for x in fmt): quality = 'CAM' size = i['size'] size = float(size) / 1073741824 if int(size) > 2 and not quality in ['1080p', 'HD']: raise Exception() if int(size) > 5: raise Exception() info = i['video_info'].replace('\n', '') v = re.compile('Video: (.+?),').findall(info)[0] a = re.compile('Audio: (.+?), .+?, (.+?),').findall( info)[0] if '3d' in fmt: q = ' | 3D' else: q = '' info = '%.2f GB%s | %s | %s | %s' % (size, q, v, a[0], a[1]) info = re.sub('\(.+?\)', '', info) info = info.replace('stereo', '2.0') info = ' '.join(info.split()) url = i['url_pls'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': 'Furk', 'quality': quality, 'provider': 'Furk', 'url': url, 'info': info }) except: pass if not all(i['quality'] in ['CAM', 'SCR'] for i in sources): sources = [ i for i in sources if not i['quality'] in ['CAM', 'SCR'] ] return sources except: return sources