def movie(self, imdb, title, year): try: if debrid.status() == False: raise Exception() t = cleantitle.get(title) headers = {'X-Requested-With': 'XMLHttpRequest'} query = self.search_link + urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) r = client.request(query, headers=headers) r = json.loads(r) r = [ i for i in r if 'category' in i and 'movie' in i['category'].lower() ] r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode({'keyword': title}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) r = json.loads(r)['content'] r = zip( client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.onemovies_info, 9000, i[1]) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) q = '%s %s' % (title, year) q = self.search_link.decode('base64') % urllib.quote_plus(q) r = client.request(q) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0].split('%')[0], re.findall('(?:^Watch |)(.+?)(?:\(|)(\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i for i in r if '/watch/' in i[0] and not '-season-' in i[0]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: url = re.sub('[^A-Za-z0-9]', '-', title).lower() url = self.moviesearch_link % (url, year) r = urlparse.urljoin(self.base_link, url) r = client.request(r, output='geturl') if not year in r: raise Exception() return url except: return
def movie(self, imdb, title, year): try: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': 'ar-AR'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() query = self.search_link % urllib.quote_plus(t) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tt'}), client.parseDOM(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def movie(self, imdb, title, year): try: key = urlparse.urljoin(self.base_link, self.key_link) key = proxy.request(key, 'searchform') key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.moviesearch_link % (urllib.quote_plus(cleantitle.query(title)), key) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'index_item')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'index_item')) result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) title = 'watch' + cleantitle.get(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [] for i in result: u = i[0] try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0] except: pass try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0] except: pass r += [(u, i[1])] match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'choose_tabs') if imdb in str(r): url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except Exception as e: logger.error(e.message) return
def movie(self, imdb, title, year): try: langMap = { 'hi': 'hindi', 'ta': 'tamil', 'te': 'telugu', 'ml': 'malayalam', 'kn': 'kannada', 'bn': 'bengali', 'mr': 'marathi', 'pa': 'punjabi' } lang = 'http://www.imdb.com/title/%s/' % imdb lang = client.request(lang) lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang) lang = [i for i in lang if 'primary_language' in i] lang = [ urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang ] lang = [ i['primary_language'] for i in lang if 'primary_language' in i ] lang = langMap[lang[0][0]] q = self.search_link % (lang, urllib.quote_plus(title)) q = urlparse.urljoin(self.base_link, q) t = cleantitle.get(title) r = self.request(q) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs={'class': 'info'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = str(r) return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', premiered)[0] years = [str(year), str(int(year) + 1), str(int(year) - 1)] season = '%01d' % int(season) episode = '%01d' % int(episode) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode( {'keyword': '%s - Season %s' % (data['tvshowtitle'], season)}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) r = json.loads(r)['content'] r = zip( client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [i for i in r if t == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.onemovies_info, 9000, i[1]) if not y in years: raise Exception() return urlparse.urlparse( i[0]).path + '?episode=%01d' % int(episode) except: pass except: return
def movie(self, imdb, title, year): self.super_url = [] try: self.super_url = [] title = cleantitle.getsearch(title) cleanmovie = cleantitle.get(title) query = "/search/%s.html" % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) link = client.request(query) r = client.parseDOM(link, 'div', attrs={'class': 'ml-item'}) for links in r: # print ("YMOVIES REQUEST", links) url = client.parseDOM(links, 'a', ret='data-url')[0] title = client.parseDOM(links, 'a', ret='title')[0] url = urlparse.urljoin(self.info_link, url) infolink = client.request(url) match_year = re.search('class="jt-info">(\d{4})<', infolink) match_year = match_year.group(1) if year in match_year: result = client.parseDOM(infolink, 'div', attrs={'class': 'jtip-bottom'}) for items in result: playurl = client.parseDOM(items, 'a', ret='href')[0] playurl = playurl.encode('utf-8') referer = "%s" % playurl mylink = client.request(referer) i_d = re.findall(r'id: "(.*?)"', mylink, re.I | re.DOTALL)[0] server = re.findall(r'server: "(.*?)"', mylink, re.I | re.DOTALL)[0] type = re.findall(r'type: "(.*?)"', mylink, re.I | re.DOTALL)[0] episode_id = re.findall(r'episode_id: "(.*?)"', mylink, re.I | re.DOTALL)[0] # print ("YMOVIES REQUEST", episode_id) token = self.__get_token() # print ("YMOVIES TOKEN", token) cookies = '%s%s%s=%s' % (self.key1, episode_id, self.key2, token) # print ("YMOVIES cookies", cookies) url_hash = urllib.quote( self.__uncensored(episode_id + self.key, token)) # print ("YMOVIES hash", url_hash) url = urlparse.urljoin( self.base_link, self.playlist % (episode_id, url_hash)) request_url = url # print ("YMOVIES REQUEST", request_url) self.super_url.append([request_url, cookies, referer]) return self.super_url except: return
def movie(self, imdb, title, year): try: download = True data = os.path.join(control.dataPath, 'provider.movie25.db') data_link = 'http://offshoregit.com/extest/provider.movie25.zip' try: download = abs( datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7) except: pass if download == True: r = client.request(data_link) zip = zipfile.ZipFile(StringIO.StringIO(r)) zip.extractall(control.dataPath) zip.close() dbcon = database.connect(data) dbcur = dbcon.cursor() dbcur.execute("SELECT * FROM movies WHERE imdb = '%s'" % imdb) url = dbcur.fetchone()[0] dbcon.close() return url except: pass try: q = self.search_link_2.decode('base64') % urllib.quote_plus(title) r = client.request(q) if r == None: r = client.request(q) if r == None: r = client.request(q) if r == None: r = client.request(q) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0], re.findall('(?:^Watch |)(.+? \(\d{4}\))', i[1])) for i in r] r = [(urlparse.urljoin(self.base_link, i[0]), i[1][0]) for i in r if i[1]] t = cleantitle.get(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] r = [i for i in r if any(x in i[1] for x in years)] match = [ i[0] for i in r if t == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'ovie') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = re.findall('(\d+)', url)[-1] return url except: pass
def sources(self, url): try: srcs = [] if url == None: return srcs if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] post = post.replace('\n', '').replace('\t', '') post = re.compile( '<span style="color: #ff0000">Single Link</b></span><br />(.+?)<span style="color: #ff0000">' ).findall(post)[0] u = re.findall('<a href="(http(?:s|)://.+?)">', post) items += [(t, i) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') srcs.append({ 'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in srcs if not i['quality'] == 'CAM'] if check: srcs = check return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] try: s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', t)[0] except: s = '0' i = client.parseDOM(post, 'link')[0] items += [{'name': t, 'url': i, 'size': s}] except: pass for item in items: try: name = item.get('name') name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb))', item.get('size'))[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) movieurl = item.get('url') result = client.request(movieurl) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, 'div', attrs={'class': 'entry'})[0] links = client.parseDOM(result, 'a', attrs={'target': '_blank'}, ret='href') for link in links: try: if link.startswith( self.base_link) or link.endswith('exe'): raise Exception() if 'http' in link: host = client.host(link) sources.append({ 'provider': 'hevcfilm', 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: pass except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check logger.debug('SOURCES URL %s' % url, __name__) return sources except: return sources
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) quality = '' srcs = [] result = '' data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = '%s %s' % (data['imdb'], data['title']) url = self.search_link % (urllib.quote_plus(url)) links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request(base_link + '/' + url) except: result = '' if 'item' in result: break result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') items = client.parseDOM(result, 'item') for item in items: title = client.parseDOM(item, 'title')[0] title = title.replace('Video Watch Online', '') title = cleantitle.get(title) ctitle = cleantitle.get('%s %s' % (data['imdb'], data['title'])) if title == ctitle: links = client.parseDOM( item, 'p', attrs={'style': 'text-align: center;'}) for link in links: if 'span' in link: if 'HD' in link: quality = 'HD' else: quality = 'SD' continue urls = client.parseDOM(link, 'a', ret='href') if len(urls) > 0: for i in range(0, len(urls)): urls[i] = client.urlRewrite(urls[i]) host = client.host(urls[0]) url = "##".join(urls) srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'DesiTashan', 'url': url, 'direct': False }) urls = [] logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def episode(self, url, imdb, tvdb, title, premiered, season, episode): self.super_url = [] try: headers = {} data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = data['year'] title = cleantitle.getsearch(title) cleanmovie = cleantitle.get(title) data['season'], data['episode'] = season, episode seasoncheck = "season%s" % season checktitle = cleanmovie + seasoncheck self.super_url = [] showlist = [] query = "/search/%s.html" % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) link = client.request(query) r = client.parseDOM(link, 'div', attrs={'class': 'ml-item'}) for links in r: season_url = client.parseDOM(links, 'a', ret='href')[0] title = client.parseDOM(links, 'a', ret='title')[0] title = title.encode('utf-8') season_url = season_url.encode('utf-8') title = cleantitle.get(title) # print "YMOVIES check URLS %s %s %s %s" % (seasoncheck, season_url, cleanmovie, title) if checktitle in title: # print "YMOVIES PASSED %s" % (season_url) showlist.append(season_url) for seasonlist in showlist: # print ('YMOVIES TV' , seasonlist) mylink = client.request(seasonlist) referer = re.findall( r'<a class="mod-btn mod-btn-watch" href="(.*?)" title="Watch movie">', mylink, re.I | re.DOTALL)[0] # print ('YMOVIES REFERER' , referer) epurl = client.request(referer) i_d = re.findall(r'id: "(.*?)"', epurl, re.I | re.DOTALL)[0] server = re.findall(r'server: "(.*?)"', epurl, re.I | re.DOTALL)[0] type = re.findall(r'type: "(.*?)"', epurl, re.I | re.DOTALL)[0] episode_id = re.findall(r'episode_id: "(.*?)"', epurl, re.I | re.DOTALL)[0] request_url = self.base_link + '/ajax/v3_movie_get_episodes/' + i_d + '/' + server + '/' + episode_id + '/' + type + '.html' headers = { 'Referer': referer, 'User-Agent': cache.get(client.randomagent, 1), 'X-Requested-With': 'XMLHttpRequest' } episodelink = client.request(request_url, headers=headers) pattern = 'episodes-server-%s"(.+?)/ul>' % server match = re.findall(pattern, episodelink, re.DOTALL)[0] # print "YMOVIES EPISODELINK %s" % match blocks = re.compile('<li(.+?)/li>', re.DOTALL).findall(match) for fragment in blocks: epnumber = re.findall('title="Episode\s+(\d+):', fragment)[0] episode = "%02d" % (int(episode)) epnumber = "%02d" % (int(epnumber)) # print "EPISODE NUMBER %s %s" % (epnumber, episode) if epnumber == episode: epid = re.findall('id="episode-(\d+)"', fragment)[0] episode_id = epid # print "EPISODE NNUMBER Passed %s %s" % (epnumber, episode) # print ("YMOVIES REQUEST", episode_id) token = self.__get_token() # print ("YMOVIES TOKEN", token) cookies = '%s%s%s=%s' % (self.key1, episode_id, self.key2, token) # print ("YMOVIES cookies", cookies) url_hash = urllib.quote( self.__uncensored(episode_id + self.key, token)) # print ("YMOVIES hash", url_hash) url = urlparse.urljoin( self.base_link, self.playlist % (episode_id, url_hash)) request_url = url # print ("YMOVIES REQUEST", request_url) self.super_url.append([request_url, cookies, referer]) # print ("YMOVIES SELFURL", self.super_url) return self.super_url except: return
def sources(self, url): try: srcs = [] if url == None: return srcs if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') link = client.parseDOM(posts[0], 'link')[0] items = [] result = client.request(link) posts = client.parseDOM(result, 'div', attrs={'id': 'content'}) for post in posts: try: items += zip( client.parseDOM(post, 'a', attrs={'target': '_blank'}), client.parseDOM(post, 'a', ret='href', attrs={'target': '_blank'})) except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') srcs.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return srcs except: return srcs