def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '').replace('\r', '') result = client.parseDOM( result, "div", attrs={"class": "td-post-content td-pb-padding-side"})[0] result = client.parseDOM(result, "p", attrs={"style": "text-align: center;"}) for item in result: try: urls = client.parseDOM(item, "a", ret="href") quality = client.parseDOM(item, "b") quality = " ".join(quality) quality = quality.lower() if "720p" in quality: quality = "HD" else: quality = "SD" for i in range(0, len(urls)): urls[i] = client.urlRewrite(urls[i]) host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'tDesiHit', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def movie(self, imdb, title, year): try: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': 'ar-AR'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() query = self.search_link % urllib.quote_plus(t) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tt'}), client.parseDOM(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def movie(self, imdb, title, year): try: self.base_link = self.base_link query = '%s %s' % (title, year) query = '%s' % (title) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "item"}) title = cleantitle.movie(title) for item in result: searchTitle = client.parseDOM(item, "span", attrs={"class": "tt"})[0] try: searchTitle = re.compile('(.+?) \d{4} ').findall( searchTitle)[0] except: pass searchTitle = cleantitle.movie(searchTitle) if title in searchTitle: url = client.parseDOM(item, "a", ret="href")[0] url = re.compile(".+/(.+?)/").findall(url)[0] break if url == None or url == '': raise Exception() return url except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return
def movie(self, imdb, title, year): try: self.base_link = self.base_link query = '%s' % (title) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "tab-content mt20"})[0] result = client.parseDOM(result, "div", attrs={"id": "movies_tab"})[0] result = client.parseDOM(result, "div", attrs={"class": "media-left"}) title = cleantitle.movie(title) for item in result: searchTitle = client.parseDOM(item, "a", ret="title")[0] searchTitle = re.compile('(.+?) [(]\d{4}[)]$').findall(searchTitle)[0] searchTitle = cleantitle.movie(searchTitle) if title == searchTitle: url = client.parseDOM(item, "a", ret="href")[0] break if url == None or url == '': raise Exception() return url except: return
def source(self, item): quality = '' try: #urls = client.parseDOM(item, "td") urls = client.parseDOM(item, "a", ret="href") for i in range(0, len(urls)): uResult = client.request(urls[i], mobile=False) uResult = uResult.replace('\n', '').replace('\t', '') if 'Could not connect to mysql! Please check your database' in uResult: uResult = client.request(urls[i], mobile=True) item = client.parseDOM(uResult, "div", attrs={"class": "videoplayer"})[0] item = re.compile('(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]' ).findall(item)[0][1] urls[i] = item host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] self.srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'ApnaView', 'url': url, 'direct': False }) except: pass
def movie(self, imdb, title, year): try: t = cleantitle.movie(title) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(title) r = client.request(q, error=True) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [i[0] for i in r if t == cleantitle.movie(i[1])] for i in r[:4]: try: m = client.request(urlparse.urljoin(self.base_link, i)) m = re.sub('\s|<.+?>|</.+?>', '', m) m = re.findall('Release:(%s)' % year, m)[0] u = i break except: pass url = re.findall('(?://.+?|)(/.+)', u)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, ep_url, imdb, tvdb, title, date, season, episode): query = '%s %s' % (imdb, title) query = self.search_link % (urllib.quote_plus(query)) result = '' links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request(base_link + query) except: result = '' if 'item' in result: break result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, 'content:encoded')[0] ep_url = client.parseDOM(result, "a", attrs={"rel": "nofollow"}, ret="href")[0] if ep_url: return ep_url
def movie(self, imdb, title, year): try: url = None self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = title query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "result clearfix"}) title = cleantitle.movie(title) for item in result: item = client.parseDOM(item, "div", attrs={"class": "details"})[0] searchTitle = client.parseDOM(item, "a")[0] searchTitle = cleantitle.movie(searchTitle) if title == searchTitle: url = client.parseDOM(item, "a", ret="href")[0] break if url == None or url == '': raise Exception() return url except: return
def movie(self, imdb, title, year): try: url = None self.base_link = random.choice( [self.base_link_1, self.base_link_2]) #query = '%s %s' % (title, year) query = title query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query, error=True) items = client.parseDOM(result, "item") title = cleantitle.movie(title) for item in items: searchTitle = client.parseDOM(item, "title")[0] searchTitle = cleantitle.movie(searchTitle) if title in searchTitle: url = client.parseDOM(item, "a", attrs={"rel": "nofollow"}, ret="href")[0] break if url == None or url == '': raise Exception() return url except: import traceback traceback.print_exc() return
def movie(self, imdb, title, year): try: self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "item") title = cleantitle.movie(title) for item in result: searchTitle = client.parseDOM(item, "title")[0] searchTitle = cleantitle.movie(searchTitle) if title == searchTitle: url = client.parseDOM(item, "link")[0] break if url == None or url == '': raise Exception() return url except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode({'keyword': title}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) r = json.loads(r)['content'] r = zip( client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.onemovies_info, 9000, i[1]) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except: return
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) try: result = client.request(url, referer=self.base_link) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') try: quality = client.parseDOM(result, "span", attrs={"class": "calidad2"})[0] except: quality = "" parts = client.parseDOM(result, "div", attrs={"class": "player_nav"})[0] parts = client.parseDOM(parts, "a") items = client.parseDOM(result, "div", attrs={"id": "player2"})[0] items = client.parseDOM(items, "div", attrs={"class": "movieplay"}) for i in range(0, len(items)): try: part = parts[i] part = cleantitle.movie(part) if not "full" in part or "option" in part: continue url = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall( items[i])[0][1] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'DesiHDMovies', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs url = self.movie_link % url url = urlparse.urljoin(self.base_link, url) rUrl = url try: result = client.request(url, referer=rUrl) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, "div", attrs={"class": "movierip"}) for item in result: try: urls = client.parseDOM(item, "a", ret="href") quality = client.parseDOM(item, "a")[0] quality = quality.lower() if "scr rip" in quality: quality = "SCR" elif "dvd" in quality: quality = "HD" else: quality = "CAM" for i in range(0, len(urls)): urls[i] = client.urlRewrite(urls[i]) host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'mDesiHit', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): try: srcs = [] if url == None: return srcs if debrid.status() == False: raise Exception() url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'p') for link in links: try: host = re.findall('Downloads-Server(.+?)(?:\'|\")\)', link)[0] host = host.strip().lower().split()[-1] if host == 'fichier': host = '1fichier' host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(link, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') r = client.parseDOM(link, 'a')[0] fmt = r.strip().lower().split() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', r)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div info = '%.2f GB' % size except: info = '' srcs.append({ 'source': host, 'quality': quality, 'provider': 'DLTube', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs try: result = client.request(self.movie_link % (self.base_link_1, url)) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '') categories = client.parseDOM(result, "div", attrs={"id": "extras"}) categories = client.parseDOM(categories, "a", attrs={"rel": "category tag"}) for category in categories: category = category.lower() if "scr" in category: quality = "SCR" break elif "bluray" in category: quality = "HD" break links = client.parseDOM( result, "div", attrs={"class": "GTTabs_divs GTTabs_curr_div"}) links += client.parseDOM(result, "div", attrs={"class": "GTTabs_divs"}) for link in links: try: url = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall( link)[0][1] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'HDBuffer', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def movie(self, imdb, title, year): self.super_url = [] try: self.super_url = [] title = cleantitle.getsearch(title) cleanmovie = cleantitle.get(title) query = "/search/%s.html" % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) link = client.request(query) r = client.parseDOM(link, 'div', attrs={'class': 'ml-item'}) for links in r: # print ("YMOVIES REQUEST", links) url = client.parseDOM(links, 'a', ret='data-url')[0] title = client.parseDOM(links, 'a', ret='title')[0] url = urlparse.urljoin(self.info_link, url) infolink = client.request(url) match_year = re.search('class="jt-info">(\d{4})<', infolink) match_year = match_year.group(1) if year in match_year: result = client.parseDOM(infolink, 'div', attrs={'class': 'jtip-bottom'}) for items in result: playurl = client.parseDOM(items, 'a', ret='href')[0] playurl = playurl.encode('utf-8') referer = "%s" % playurl mylink = client.request(referer) i_d = re.findall(r'id: "(.*?)"', mylink, re.I | re.DOTALL)[0] server = re.findall(r'server: "(.*?)"', mylink, re.I | re.DOTALL)[0] type = re.findall(r'type: "(.*?)"', mylink, re.I | re.DOTALL)[0] episode_id = re.findall(r'episode_id: "(.*?)"', mylink, re.I | re.DOTALL)[0] # print ("YMOVIES REQUEST", episode_id) token = self.__get_token() # print ("YMOVIES TOKEN", token) cookies = '%s%s%s=%s' % (self.key1, episode_id, self.key2, token) # print ("YMOVIES cookies", cookies) url_hash = urllib.quote( self.__uncensored(episode_id + self.key, token)) # print ("YMOVIES hash", url_hash) url = urlparse.urljoin( self.base_link, self.playlist % (episode_id, url_hash)) request_url = url # print ("YMOVIES REQUEST", request_url) self.super_url.append([request_url, cookies, referer]) return self.super_url except: return
def movie(self, imdb, title, year): try: key = urlparse.urljoin(self.base_link, self.key_link) key = proxy.request(key, 'searchform') key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.moviesearch_link % (urllib.quote_plus(cleantitle.query(title)), key) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'index_item')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'index_item')) result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) title = 'watch' + cleantitle.get(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [] for i in result: u = i[0] try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0] except: pass try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0] except: pass r += [(u, i[1])] match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'choose_tabs') if imdb in str(r): url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except Exception as e: logger.error(e.message) return
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' self.srcs = [] if url == None: return self.srcs try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "col-md-12 mt20"})[0] try : item = client.parseDOM(result, "center")[0] url = re.compile('(SRC|src|data-config)=\"(.+?)\"').findall(item)[0][1] host = client.host(url) self.srcs.append({'source': host, 'parts' : '1', 'quality': quality, 'provider': 'iBollyTV', 'url': url, 'direct':False}) except: pass hypermode = False if control.setting('hypermode') == 'false' else True threads = [] try : result = client.parseDOM(result, "div", attrs={"class": "table-responsive"})[0] result = client.parseDOM(result, "tbody")[0] result = client.parseDOM(result, "tr") for item in result: if hypermode : threads.append(workers.Thread(self.source, item)) else : self.source(item) if hypermode: [i.start() for i in threads] stillWorking = True while stillWorking: stillWorking = False stillWorking = [True for x in threads if x.is_alive() == True] except: pass logger.debug('SOURCES [%s]' % self.srcs, __name__) return self.srcs except: return self.srcs
def movie(self, imdb, title, year): try: langMap = { 'hi': 'hindi', 'ta': 'tamil', 'te': 'telugu', 'ml': 'malayalam', 'kn': 'kannada', 'bn': 'bengali', 'mr': 'marathi', 'pa': 'punjabi' } lang = 'http://www.imdb.com/title/%s/' % imdb lang = client.request(lang) lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang) lang = [i for i in lang if 'primary_language' in i] lang = [ urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang ] lang = [ i['primary_language'] for i in lang if 'primary_language' in i ] lang = langMap[lang[0][0]] q = self.search_link % (lang, urllib.quote_plus(title)) q = urlparse.urljoin(self.base_link, q) t = cleantitle.get(title) r = self.request(q) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs={'class': 'info'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = str(r) return url except: return
def sources(self, url): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'player_wraper'}) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: m = '"(?:url|src)"\s*:\s*"(.+?)"' d = urlparse.urljoin(self.base_link, u) s = client.request(d, referer=url, timeout='10') j = re.compile('<script>(.+?)</script>', re.DOTALL).findall(s) for i in j: try: s += jsunpack.unpack(i) except: pass u = re.findall(m, s) if not u: p = re.findall('location\.href\s*=\s*"(.+?)"', s) if not p: p = ['/player/%s' % d.strip('/').split('/')[-1]] p = urlparse.urljoin(self.base_link, p[0]) s = client.request(p, referer=d, timeout='10') u = re.findall(m, s) for i in u: try: sources.append({'provider':'movies14','source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def onemovies_info(self, url): try: u = urlparse.urljoin(self.base_link, self.info_link) u = client.request(u % url) q = client.parseDOM(u, 'div', attrs={'class': 'jtip-quality'})[0] y = client.parseDOM(u, 'div', attrs={'class': 'jt-info'}) y = [ i.strip() for i in y if i.strip().isdigit() and len(i.strip()) == 4 ][0] return (y, q) except: return
def resolve(self, url, resolverList): try: logger.debug('ORIGINAL URL [%s]' % url, __name__) result = client.request(url, headers=self.headers) playdata = 'window.pl_data = (\{.*?"key":.*?\}\})' result = re.findall(playdata, result)[0] try: result = json.loads(result) link = result['live']['channel_list'][0]['file'] key = result['live']['key'] link = link.decode('base64') key = key.decode('base64') de = pyaes.new(key, pyaes.MODE_CBC, IV='\0' * 16) link = de.decrypt(link).replace('\x00', '').split('\0')[0] link = re.sub('[^\s!-~]', '', link) except: link = client.parseDOM(result, "source", attrs={"type": "application/x-mpegurl"}, ret="src")[0] logger.debug('URL : [%s]' % link, __name__) url = '%s|Referer=%s' % (link.strip(), url) result = client.validateUrl(url) logger.debug('RESOLVED URL [%s]' % url, __name__) return url except: return False
def resolve(url): try: result = client.request(url) dek = EnkDekoder.dekode(result) if not dek == None: url = client.parseDOM(dek, "param", attrs={"name": "flashvars"}, ret="value")[0] else: dek = result url = re.compile('file*:*"(http.+?)"').findall(dek)[0] if re.search(';video_url', url): url = re.findall(';video_url=(.+?)&', url)[0] elif re.search('iframe src=', url): url = re.findall('<iframe src="(.+?)"', url)[0] url = url.replace('_ipod.mp4', '.flv') url = url.replace('preview', 'edit') logger.debug('URL [%s]' % url, __name__) return url except: return False
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', premiered)[0] years = [str(year), str(int(year) + 1), str(int(year) - 1)] season = '%01d' % int(season) episode = '%01d' % int(episode) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode( {'keyword': '%s - Season %s' % (data['tvshowtitle'], season)}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) r = json.loads(r)['content'] r = zip( client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [i for i in r if t == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.onemovies_info, 9000, i[1]) if not y in years: raise Exception() return urlparse.urlparse( i[0]).path + '?episode=%01d' % int(episode) except: pass except: return
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: if url == None: return self.srcs url = '%s%s' % (self.base_link, url) try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, "table", attrs={"class": "table table-bordered"})[0] result = client.parseDOM(result, "tbody")[0] result = client.parseDOM(result, "tr") hypermode = False if control.setting( 'hypermode') == 'false' else True threads = [] for item in result: if hypermode: threads.append(workers.Thread(self.source, item)) else: self.source(item) if hypermode: [i.start() for i in threads] stillWorking = True while stillWorking: stillWorking = False stillWorking = [ True for x in threads if x.is_alive() == True ] logger.debug('SOURCES [%s]' % self.srcs, __name__) return self.srcs except: return self.srcs
def sources(self, url): try: srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'choose_tabs') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url.startswith("//"): url = 'http:%s' % url host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] #if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM' elif quality == 'quality_dvd': quality = 'SD' else: raise Exception() srcs.append({'source': host, 'parts' : '1','quality': quality, 'provider': 'Primewire', 'url': url, 'direct': False, 'debridonly': False}) except: pass return srcs except: return srcs
def tvshows(self, name, url): try: result = '' shows = [] links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request('%s/%s' % (base_link, url)) except: result = '' if 'tab_container' in result: break rawResult = result.decode('iso-8859-1').encode('utf-8') rawResult = rawResult.replace('\n', '').replace('\t', '').replace('\r', '') rawResult = client.parseDOM(rawResult, "div", attrs={"id": "tab-0-title-1"})[0] result = client.parseDOM(rawResult, "div", attrs={"class": "one_fourth "}) result += client.parseDOM( rawResult, "div", attrs={"class": "one_fourth column-last "}) for item in result: title = '' url = '' title = client.parseDOM(item, "p", attrs={"class": "small-title"})[0] url = client.parseDOM(item, "a", ret="href")[0] title = client.parseDOM(title, "a")[0] title = client.replaceHTMLCodes(title) poster = client.parseDOM(item, "img", ret="src")[0] if 'concert' in title.lower(): continue shows.append({ 'name': title, 'channel': name, 'title': title, 'url': url, 'poster': poster, 'banner': poster, 'fanart': poster, 'next': '0', 'year': '0', 'duration': '0', 'provider': 'yodesi' }) return shows except: client.printException('') return
def source(self, item): quality = '' try : urls = client.parseDOM(item, "td", attrs={"class": "col-md-7"})[0] urls = client.parseDOM(urls, "a", ret="href") for i in range(0, len(urls)): item = client.request(urls[i], mobile=False) item = item.replace('\n','').replace('\t','') item = client.parseDOM(item, "div", attrs={"class": "embed-responsive embed-responsive-16by9"})[0] item = re.compile('(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall(item)[0][1] urls[i] = item host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] self.srcs.append({'source': host, 'parts' : str(len(urls)), 'quality': quality, 'provider': 'iBollyTV', 'url': url, 'direct':False}) except: pass
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) quality = 'HD' srcs = [] result = '' try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, "div", attrs={"class": "single-post-video"})[0] items = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall(result) for item in items: if item[1].endswith('png'): continue host = client.host(item[1]) url = item[1] parts = [url] #parts = client.parseDOM(result, "script", ret="data-config") #for i in range(0, len(parts)): # if parts[i].startswith('//'): # parts[i]='http:%s'%parts[i] #host = client.host(parts[0]) #if len(parts) > 1 : # url = "##".join(parts) #else : # url = parts[0] srcs.append({ 'source': host, 'parts': len(parts), 'quality': quality, 'provider': 'BadtameezDil', 'url': "##".join(parts), 'direct': False }) logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def desiRulezCache(self): try: base_link = 'http://www.desirulez.me/forums/20-Latest-Exclusive-Movie-HQ' result = client.request(base_link) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "li", attrs={"class": "threadbit hot"}) movies = [] for link in result: link = client.parseDOM(link, "h3", attrs={"class": "threadtitle"})[0] url = client.parseDOM(link, "a", ret="href")[0] title = client.parseDOM(link, "a")[0] title = cleantitle.movie(title).replace( 'watchonline/download', '') movies.append({'url': url, 'title': title}) return movies except: pass