def movie(self, imdb, title, year): try: self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "item") title = cleantitle.movie(title) for item in result: searchTitle = client.parseDOM(item, "title")[0] searchTitle = cleantitle.movie(searchTitle) if title == searchTitle: url = client.parseDOM(item, "link")[0] break if url == None or url == '': raise Exception() return url except: return
def movie(self, imdb, title, year): try: self.base_link = random.choice( [self.base_link_1, self.base_link_2]) self.login() query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = json.loads(result) result = result['rows'] title = cleantitle.movie(title) for item in result: searchTitle = cleantitle.movie(item['title']) if title == searchTitle: url = self.info_link % item['asset_id'] break if url == None or url == '': raise Exception() return url except: return
def movie(self, imdb, title, year): try: self.base_link = self.base_link query = '%s' % (title) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "tab-content mt20"})[0] result = client.parseDOM(result, "div", attrs={"id": "movies_tab"})[0] result = client.parseDOM(result, "div", attrs={"class": "media-left"}) title = cleantitle.movie(title) for item in result: searchTitle = client.parseDOM(item, "a", ret="title")[0] searchTitle = re.compile('(.+?) [(]\d{4}[)]$').findall(searchTitle)[0] searchTitle = cleantitle.movie(searchTitle) if title == searchTitle: url = client.parseDOM(item, "a", ret="href")[0] break if url == None or url == '': raise Exception() return url except: return
def movie(self, imdb, title, year): try: url = None self.base_link = random.choice( [self.base_link_1, self.base_link_2]) #query = '%s %s' % (title, year) query = title query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query, error=True) items = client.parseDOM(result, "item") title = cleantitle.movie(title) for item in items: searchTitle = client.parseDOM(item, "title")[0] searchTitle = cleantitle.movie(searchTitle) if title in searchTitle: url = client.parseDOM(item, "a", attrs={"rel": "nofollow"}, ret="href")[0] break if url == None or url == '': raise Exception() return url except: import traceback traceback.print_exc() return
def movie(self, imdb, title, year): try: t = cleantitle.movie(title) try: query = '%s %s' % (title, year) query = base64.b64decode( self.search_link) % urllib.quote_plus(query) result = client.request(query) result = json.loads(result)['results'] r = [(i['url'], i['titleNoFormatting']) for i in result] r = [(i[0], re.compile('(.+?) [\d{4}|(\d{4})]').findall(i[1])) for i in r] r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [x for y, x in enumerate(r) if x not in r[:y]] r = [i for i in r if t == cleantitle.movie(i[1])] u = [i[0] for i in r][0] except: return url = u url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: self.base_link = self.base_link query = '%s %s' % (title, year) query = '%s' % (title) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "item"}) title = cleantitle.movie(title) for item in result: searchTitle = client.parseDOM(item, "span", attrs={"class": "tt"})[0] try: searchTitle = re.compile('(.+?) \d{4} ').findall( searchTitle)[0] except: pass searchTitle = cleantitle.movie(searchTitle) if title in searchTitle: url = client.parseDOM(item, "a", ret="href")[0] url = re.compile(".+/(.+?)/").findall(url)[0] break if url == None or url == '': raise Exception() return url except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return
def movie(self, imdb, title, year): try: t = cleantitle.movie(title) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(title) r = client.request(q, error=True) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [i[0] for i in r if t == cleantitle.movie(i[1])] for i in r[:4]: try: m = client.request(urlparse.urljoin(self.base_link, i)) m = re.sub('\s|<.+?>|</.+?>', '', m) m = re.findall('Release:(%s)' % year, m)[0] u = i break except: pass url = re.findall('(?://.+?|)(/.+)', u)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: url = None self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = title query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "result clearfix"}) title = cleantitle.movie(title) for item in result: item = client.parseDOM(item, "div", attrs={"class": "details"})[0] searchTitle = client.parseDOM(item, "a")[0] searchTitle = cleantitle.movie(searchTitle) if title == searchTitle: url = client.parseDOM(item, "a", ret="href")[0] break if url == None or url == '': raise Exception() return url except: return
def movie(self, imdb, title, year): try: url = None self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = urllib.quote_plus(query) query = self.search_link % (query) query = urlparse.urljoin(self.base_link % 'search', query) result = client.request(query, headers=self.headers) result = result.decode('iso-8859-1').encode('utf-8') result = json.loads(result) result = result['resultObj']['response']['docs'] title = cleantitle.movie(title) for item in result: searchTitle = cleantitle.movie(item['contentTitle']) if title == searchTitle: url = self.cdn_link % item['contentId'] break if url == None or url == '': raise Exception() return url except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return
def movie(self, imdb, title, year): try: movies = cache.get(self.desiRulezCache, 168) url = [ i['url'] for i in movies if cleantitle.movie(i['title']) == cleantitle.movie(title) ][0] return url except: pass
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) try: result = client.request(url, referer=self.base_link) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') try: quality = client.parseDOM(result, "span", attrs={"class": "calidad2"})[0] except: quality = "" parts = client.parseDOM(result, "div", attrs={"class": "player_nav"})[0] parts = client.parseDOM(parts, "a") items = client.parseDOM(result, "div", attrs={"id": "player2"})[0] items = client.parseDOM(items, "div", attrs={"class": "movieplay"}) for i in range(0, len(items)): try: part = parts[i] part = cleantitle.movie(part) if not "full" in part or "option" in part: continue url = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall( items[i])[0][1] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'DesiHDMovies', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return srcs
def desiRulezCache(self): try: base_link = 'http://www.desirulez.me/forums/20-Latest-Exclusive-Movie-HQ' result = client.request(base_link) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "li", attrs={"class": "threadbit hot"}) movies = [] for link in result: link = client.parseDOM(link, "h3", attrs={"class": "threadtitle"})[0] url = client.parseDOM(link, "a", ret="href")[0] title = client.parseDOM(link, "a")[0] title = cleantitle.movie(title).replace( 'watchonline/download', '') movies.append({'url': url, 'title': title}) return movies except: pass
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) imdb, title, year = data.get('imdb'), data.get('title'), data.get( 'year') self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') posts = client.parseDOM(result, "item") items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] if 'trailer' in cleantitle.movie(t): raise Exception() try: s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', t)[0] except: s = '0' i = client.parseDOM(post, 'link')[0] items += [{'name': t, 'url': i, 'size': s}] except: pass title = cleantitle.movie(title) for item in items: try: name = item.get('name') t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) #searchTitle = re.compile('(.+?) \d{4}').findall(searchTitle)[0] #searchTitle = cleantitle.movie(searchTitle) if cleantitle.movie(title) == cleantitle.movie(t): y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == year: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', item.get('size'))[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) movieurl = item.get('url') result = client.request(movieurl) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, 'div', attrs={'class': 'entry'})[0] #result = client.parseDOM(result, 'div', attrs={'class':'separator'}) #result = re.findall('<div class=\"wpz-sc-box(.+?)<div class=\"wpz-sc-box download', result) links = client.parseDOM(result, 'a', attrs={'target': '_blank'}, ret='href') for link in links: if 'http' in link: #if urlresolver.HostedMediaFile(url= link): host = client.host(link) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'world4u', 'url': link, 'direct': False, 'info': info }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: import traceback traceback.print_exc() return srcs