def source(self, item): quality = '' try: #urls = client.parseDOM(item, "td") urls = client.parseDOM(item, "a", ret="href") for i in range(0, len(urls)): uResult = client.request(urls[i], mobile=False) uResult = uResult.replace('\n', '').replace('\t', '') if 'Could not connect to mysql! Please check your database' in uResult: uResult = client.request(urls[i], mobile=True) item = client.parseDOM(uResult, "div", attrs={"class": "videoplayer"})[0] item = re.compile('(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]' ).findall(item)[0][1] urls[i] = item host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] self.srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'ApnaView', 'url': url, 'direct': False }) except: pass
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '').replace('\r', '') result = client.parseDOM( result, "div", attrs={"class": "td-post-content td-pb-padding-side"})[0] result = client.parseDOM(result, "p", attrs={"style": "text-align: center;"}) for item in result: try: urls = client.parseDOM(item, "a", ret="href") quality = client.parseDOM(item, "b") quality = " ".join(quality) quality = quality.lower() if "720p" in quality: quality = "HD" else: quality = "SD" for i in range(0, len(urls)): urls[i] = client.urlRewrite(urls[i]) host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'tDesiHit', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) try: result = client.request(url, referer=self.base_link) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') try: quality = client.parseDOM(result, "span", attrs={"class": "calidad2"})[0] except: quality = "" parts = client.parseDOM(result, "div", attrs={"class": "player_nav"})[0] parts = client.parseDOM(parts, "a") items = client.parseDOM(result, "div", attrs={"id": "player2"})[0] items = client.parseDOM(items, "div", attrs={"class": "movieplay"}) for i in range(0, len(items)): try: part = parts[i] part = cleantitle.movie(part) if not "full" in part or "option" in part: continue url = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall( items[i])[0][1] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'DesiHDMovies', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs url = self.movie_link % url url = urlparse.urljoin(self.base_link, url) rUrl = url try: result = client.request(url, referer=rUrl) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, "div", attrs={"class": "movierip"}) for item in result: try: urls = client.parseDOM(item, "a", ret="href") quality = client.parseDOM(item, "a")[0] quality = quality.lower() if "scr rip" in quality: quality = "SCR" elif "dvd" in quality: quality = "HD" else: quality = "CAM" for i in range(0, len(urls)): urls[i] = client.urlRewrite(urls[i]) host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'mDesiHit', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs try: result = client.request(self.movie_link % (self.base_link_1, url)) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '') categories = client.parseDOM(result, "div", attrs={"id": "extras"}) categories = client.parseDOM(categories, "a", attrs={"rel": "category tag"}) for category in categories: category = category.lower() if "scr" in category: quality = "SCR" break elif "bluray" in category: quality = "HD" break links = client.parseDOM( result, "div", attrs={"class": "GTTabs_divs GTTabs_curr_div"}) links += client.parseDOM(result, "div", attrs={"class": "GTTabs_divs"}) for link in links: try: url = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall( link)[0][1] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'HDBuffer', 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def request(url, resolverList=None): u = url url = False # Custom Resolvers try: host = client.host(u) r = [i['class'] for i in info() if host in i['host']][0] r = __import__(r, globals(), locals(), [], -1) url = r.resolve(u) if url == False: raise Exception() except: pass # URLResolvers 3.0.0 try: if not url == False: raise Exception() logger.debug('Trying URL Resolver for %s' % u, __name__) hmf = urlresolver.HostedMediaFile(url=u, include_disabled=True, include_universal=False) if hmf.valid_url() == True: url = hmf.resolve() else: url = False except: pass try: headers = url.rsplit('|', 1)[1] except: headers = '' headers = urllib.quote_plus(headers).replace('%3D', '=').replace( '%26', '&') if ' ' in headers else headers headers = dict(urlparse.parse_qsl(headers)) if url.startswith('http') and '.m3u8' in url: result = client.request(url.split('|')[0], headers=headers, output='geturl', timeout='20') if result == None: raise Exception() elif url.startswith('http'): result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20') if result == None: logger.debug('Resolved %s but unable to play' % url, __name__) raise Exception() return url
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) quality = 'HD' srcs = [] result = '' try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, "div", attrs={"class": "single-post-video"})[0] items = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall(result) for item in items: if item[1].endswith('png'): continue host = client.host(item[1]) url = item[1] parts = [url] #parts = client.parseDOM(result, "script", ret="data-config") #for i in range(0, len(parts)): # if parts[i].startswith('//'): # parts[i]='http:%s'%parts[i] #host = client.host(parts[0]) #if len(parts) > 1 : # url = "##".join(parts) #else : # url = parts[0] srcs.append({ 'source': host, 'parts': len(parts), 'quality': quality, 'provider': 'BadtameezDil', 'url': "##".join(parts), 'direct': False }) logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' self.srcs = [] if url == None: return self.srcs try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "col-md-12 mt20"})[0] try : item = client.parseDOM(result, "center")[0] url = re.compile('(SRC|src|data-config)=\"(.+?)\"').findall(item)[0][1] host = client.host(url) self.srcs.append({'source': host, 'parts' : '1', 'quality': quality, 'provider': 'iBollyTV', 'url': url, 'direct':False}) except: pass hypermode = False if control.setting('hypermode') == 'false' else True threads = [] try : result = client.parseDOM(result, "div", attrs={"class": "table-responsive"})[0] result = client.parseDOM(result, "tbody")[0] result = client.parseDOM(result, "tr") for item in result: if hypermode : threads.append(workers.Thread(self.source, item)) else : self.source(item) if hypermode: [i.start() for i in threads] stillWorking = True while stillWorking: stillWorking = False stillWorking = [True for x in threads if x.is_alive() == True] except: pass logger.debug('SOURCES [%s]' % self.srcs, __name__) return self.srcs except: return self.srcs
def source(self, item): quality = '' try : urls = client.parseDOM(item, "td", attrs={"class": "col-md-7"})[0] urls = client.parseDOM(urls, "a", ret="href") for i in range(0, len(urls)): item = client.request(urls[i], mobile=False) item = item.replace('\n','').replace('\t','') item = client.parseDOM(item, "div", attrs={"class": "embed-responsive embed-responsive-16by9"})[0] item = re.compile('(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]').findall(item)[0][1] urls[i] = item host = client.host(urls[0]) if len(urls) > 1: url = "##".join(urls) else: url = urls[0] self.srcs.append({'source': host, 'parts' : str(len(urls)), 'quality': quality, 'provider': 'iBollyTV', 'url': url, 'direct':False}) except: pass
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs if 'hd' in url.lower(): quality = 'HD' else: quality = 'SD' html = client.request(url) mlink = SoupStrainer("div", {"class": "entry"}) videoclass = BeautifulSoup(html, parseOnlyThese=mlink) try: links = videoclass.findAll('iframe') for link in links: url = link.get('src') host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'tamilyogi', 'url': url, 'direct': False }) except: pass return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] result = '' links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request(base_link + '/' + url) if result == None: raise Exception() except: result = '' if 'blockquote' in result: break result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '') ### DIRTY Implementation import BeautifulSoup soup = BeautifulSoup.BeautifulSoup(result).findAll( 'blockquote', {'class': re.compile(r'\bpostcontent\b')})[0] for e in soup.findAll('br'): e.extract() if soup.has_key('div'): soup = soup.findChild('div', recursive=False) urls = [] quality = '' for child in soup.findChildren(): if (child.getText() == '') or ( (child.name == 'font' or child.name == 'a') and re.search( 'DesiRulez', str(child.getText()), re.IGNORECASE)): continue elif (child.name == 'font') and re.search( 'Links|Online|Link', str(child.getText()), re.IGNORECASE): if len(urls) > 0: for i in range(0, len(urls)): try: result = client.request(urls[i]) item = client.parseDOM( result, name="div", attrs={ "style": "float:right;margin-bottom:10px" })[0] rUrl = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]' ).findall(item)[0][1] rUrl = client.urlRewrite(rUrl) urls[i] = rUrl except: urls[i] = client.urlRewrite(urls[i]) pass host = client.host(urls[0]) url = "##".join(urls) srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'DesiRulez', 'url': url, 'direct': False }) quality = '' urls = [] quality = child.getText() if '720p HD' in quality: quality = 'HD' elif 'Scr' in quality: quality = 'SCR' else: quality = '' elif (child.name == 'a') and not child.getText() == 'registration': urls.append(str(child['href'])) if quality == '': quality = child.getText() if '720p HD' in quality: quality = 'HD' elif 'Scr' in quality: quality = 'SCR' elif 'Dvd' in quality: quality = 'SD' else: quality = '' if len(urls) > 0: for i in range(0, len(urls)): try: result = client.request(urls[i]) item = client.parseDOM( result, name="div", attrs={"style": "float:right;margin-bottom:10px"})[0] rUrl = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]' ).findall(item)[0][1] rUrl = client.urlRewrite(rUrl) urls[i] = rUrl except: urls[i] = client.urlRewrite(urls[i]) pass host = client.host(urls[0]) url = "##".join(urls) srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'DesiRulez', 'url': url, 'direct': False }) logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) imdb, title, year = data.get('imdb'), data.get('title'), data.get( 'year') self.base_link = random.choice( [self.base_link_1, self.base_link_2]) query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') posts = client.parseDOM(result, "item") items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] if 'trailer' in cleantitle.movie(t): raise Exception() try: s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', t)[0] except: s = '0' i = client.parseDOM(post, 'link')[0] items += [{'name': t, 'url': i, 'size': s}] except: pass title = cleantitle.movie(title) for item in items: try: name = item.get('name') t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) #searchTitle = re.compile('(.+?) \d{4}').findall(searchTitle)[0] #searchTitle = cleantitle.movie(searchTitle) if cleantitle.movie(title) == cleantitle.movie(t): y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == year: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', item.get('size'))[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) movieurl = item.get('url') result = client.request(movieurl) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, 'div', attrs={'class': 'entry'})[0] #result = client.parseDOM(result, 'div', attrs={'class':'separator'}) #result = re.findall('<div class=\"wpz-sc-box(.+?)<div class=\"wpz-sc-box download', result) links = client.parseDOM(result, 'a', attrs={'target': '_blank'}, ret='href') for link in links: if 'http' in link: #if urlresolver.HostedMediaFile(url= link): host = client.host(link) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'world4u', 'url': link, 'direct': False, 'info': info }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: import traceback traceback.print_exc() return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs #result = base64.b64decode("ICAgICAgPC9kaXY+DQogICAgICAgIDwvdGJvZHk+DQogICAgICA8L3RhYmxlPg0KICAgIAkgICAgDQogICAgPC9kaXY+DQoNCiAgICAgICAgICAgIA0KCTxkaXYgY2xhc3M9InRpdGxlIj5Eb3dubG9hZCBGdWxsIE1vdmllIEluOjwvZGl2Pg0KCQkJPGR0Pg0KICAgIDx0YWJsZSBjbGFzcz0icm93cyBkaWZmZXJfZG93bmxvYWQiIGNlbGxwYWRkaW5nPSIwIiBjZWxsc3BhY2luZz0iMCI+DQogICAgICAgIDx0Ym9keT4NCiAgICAgICAgCQkJCQkJPHRyPg0KCQkJCQkJPHRkPjxzcGFuIGNsYXNzPSJsZWZ0LWRlc2lnbiI+PC9zcGFuPg0KCQkJCQkJCQ0KCQkJCQkJCTxhIGhyZWY9Imh0dHA6Ly9wbS5pbnN1cmFuY2ViYW5rLm9yZy9kb3dubG9hZC5waHA/c29uZ19pZD0xNzQ5JnVybF9pZD0zNDM0MSI+DQoJCQkJCQkJCURvd25sb2FkIDxzcGFuIGNsYXNzPSJxdWFsaXR5XzEiPjNHUDwvc3Bhbj4NCgkJCQkJCQk8L2E+IA0KCQkJCQkJCTxzbWFsbD4NCgkJCQkJCQkJPHNwYW4gc3R5bGU9ImNvbG9yOmdyZWVuO2ZvbnQtc2l6ZToxMHB4OyI+KCAyNzIuNjggTUIpPC9zcGFuPg0KCQkJCQkJCTwvc21hbGw+DQoJCQkJCQk8L3RkPg0KCQkJCQk8L3RyPg0KCQkJCQkJCQk8dHI+DQoJCQkJCQk8dGQ+PHNwYW4gY2xhc3M9ImxlZnQtZGVzaWduIj48L3NwYW4+DQoJCQkJCQkJDQoJCQkJCQkJPGEgaHJlZj0iaHR0cDovL3BtLmluc3VyYW5jZWJhbmsub3JnL2Rvd25sb2FkLnBocD9zb25nX2lkPTE3NDkmdXJsX2lkPTM0MzM2Ij4NCgkJCQkJCQkJRG93bmxvYWQgPHNwYW4gY2xhc3M9InF1YWxpdHlfMSI+TVA0PC9zcGFuPg0KCQkJCQkJCTwvYT4gDQoJCQkJCQkJPHNtYWxsPg0KCQkJCQkJCQk8c3BhbiBzdHlsZT0iY29sb3I6Z3JlZW47Zm9udC1zaXplOjEwcHg7Ij4oIDQyNi4xNiBNQik8L3NwYW4+DQoJCQkJCQkJPC9zbWFsbD4NCgkJCQkJCTwvdGQ+DQoJCQkJCTwvdHI+DQoJCQkJCQkJCTx0cj4NCgkJCQkJCTx0ZD48c3BhbiBjbGFzcz0ibGVmdC1kZXNpZ24iPjwvc3Bhbj4NCgkJCQkJCQkNCgkJCQkJCQk8YSBocmVmPSJodHRwOi8vcG0uaW5zdXJhbmNlYmFuay5vcmcvZG93bmxvYWQucGhwP3NvbmdfaWQ9MTc0OSZ1cmxfaWQ9MzQzMzAiPg0KCQkJCQkJCQlEb3dubG9hZCA8c3BhbiBjbGFzcz0icXVhbGl0eV8xIj5NcDQgKDM2MHApPC9zcGFuPg0KCQkJCQkJCTwvYT4gDQoJCQkJCQkJPHNtYWxsPg0KCQkJCQkJCQk8c3BhbiBzdHlsZT0iY29sb3I6Z3JlZW47Zm9udC1zaXplOjEwcHg7Ij4oIDg0NS41NCBNQik8L3NwYW4+DQoJCQkJCQkJPC9zbWFsbD4NCgkJCQkJCTwvdGQ+DQoJCQkJCTwvdHI+DQoJCQkJCQkJCTx0cj4NCgkJCQkJCTx0ZD48c3BhbiBjbGFzcz0ibGVmdC1kZXNpZ24iPjwvc3Bhbj4NCgkJCQkJCQkNCgkJCQkJCQk8YSBocmVmPSJodHRwOi8vcG0uaW5zdXJhbmNlYmFuay5vcmcvZG93bmxvYWQucGhwP3NvbmdfaWQ9MTc0OSZ1cmxfaWQ9MzQzMjQiPg0KCQkJCQkJCQlEb3dubG9hZCA8c3BhbiBjbGFzcz0icXVhbGl0eV8xIj5NcDQgKDcyMHApPC9zcGFuPg0KCQkJCQkJCTwvYT4gDQoJCQkJCQkJPHNtYWxsPg0KCQkJCQkJCQk8c3BhbiBzdHlsZT0iY29sb3I6Z3JlZW47Zm9udC1zaXplOjEwcHg7Ij4oIDEuMDggR0IpPC9zcGFuPg0KCQkJCQkJCTwvc21hbGw+DQoJCQkJCQk8L3RkPg0KCQkJCQk8L3RyPg0KCQkJCQkJPHRyPg0KCQkJCTx0ZD4NCgkJCQkJCQkJCTwvdGQ+DQoJCQk8L3RyPg0KCQkJCQkJDQoJCQkJPHRyPg0KCQkJPHRkIGNsYXNzPSJuZXh0LW1vdmllIj4JDQoJCQkJPGEgaHJlZj0iLzE3NDlwL0Rvd25sb2FkLVZpZGVvLWluLXBhcnRzUHJlbS1SYXRhbi1EaGFuLVBheW8tMjAxNS1IRHMtRFZEUmlwLS0uaHRtIj4NCgkJCQkJRG93bmxvYWQgTW92aWUgaW50byBQYXJ0cw0KCQkJCTwvYT4NCgkJCTwvdGQ+DQoJCTwvdHI+DQoJCQkJPHRyPg0KCQkJPHRkPgkNCgkJCQkJCQk8L3RkPg0KCQk8L3RyPg0KICAgICAgICA8L3Rib2R5Pg0KCTwvdGFibGU+") result = client.request(url) #links = client.parseDOM(result, 'div', attrs = {'class': 'listed'}) result = result.replace('\n', '').replace('\t', '').replace('\r', '') result = client.parseDOM(result, "div", attrs={"id": "main"})[0] #result = re.compile('Download Full Movie In:(.+?)Download Movie into Parts').findall(result) #result = re.compile('Download Full Movie In:(.+?)Download Movie into Parts').findall(result)[0] result = re.compile( 'Download Full Movie In:</div>(.+?)<div class=\"next-movie\">' ).findall(result)[0] #result = client.parseDOM(result, name="div", attrs={"class":"head"})[0] #links = client.parseDOM(result, name="div") #if len(links) == 0: # links = client.parseDOM(result, name="div", attrs={'class':'listed'}) quality = client.parseDOM(result, "a") links = client.parseDOM(result, "a", ret="href") links = dict(zip(quality, links)) for key in links: if not 'quality_1' in key: continue try: quality = client.parseDOM(key, 'span', attrs={'class': 'quality_1'})[0].lower() except: quality = 'hd' if quality == 'ts': quality = 'CAM' elif '360p' in quality: quality = 'SD' elif '720p' in quality: quality = 'HD' else: quality = 'SD' url = links[key] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'filmywap', 'url': url, 'direct': False, 'debridonly': False }) logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] try: s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', t)[0] except: s = '0' i = client.parseDOM(post, 'link')[0] items += [{'name': t, 'url': i, 'size': s}] except: pass for item in items: try: name = item.get('name') name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb))', item.get('size'))[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) movieurl = item.get('url') result = client.request(movieurl) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, 'div', attrs={'class': 'entry'})[0] links = client.parseDOM(result, 'a', attrs={'target': '_blank'}, ret='href') for link in links: try: if link.startswith( self.base_link) or link.endswith('exe'): raise Exception() if 'http' in link: host = client.host(link) sources.append({ 'provider': 'hevcfilm', 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: pass except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check logger.debug('SOURCES URL %s' % url, __name__) return sources except: return sources
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) quality = '' srcs = [] result = '' data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = '%s %s' % (data['imdb'], data['title']) url = self.search_link % (urllib.quote_plus(url)) links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request(base_link + '/' + url) except: result = '' if 'item' in result: break result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') items = client.parseDOM(result, 'item') for item in items: title = client.parseDOM(item, 'title')[0] title = title.replace('Video Watch Online', '') title = cleantitle.get(title) ctitle = cleantitle.get('%s %s' % (data['imdb'], data['title'])) if title == ctitle: links = client.parseDOM( item, 'p', attrs={'style': 'text-align: center;'}) for link in links: if 'span' in link: if 'HD' in link: quality = 'HD' else: quality = 'SD' continue urls = client.parseDOM(link, 'a', ret='href') if len(urls) > 0: for i in range(0, len(urls)): urls[i] = client.urlRewrite(urls[i]) host = client.host(urls[0]) url = "##".join(urls) srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'DesiTashan', 'url': url, 'direct': False }) urls = [] logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) quality = '' srcs = [] result = '' links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request(base_link + '/' + url) except: result = '' if 'item' in result: break result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '') items = client.parseDOM(result, 'content:encoded')[0] items = re.compile('class=\"single-heading\">(.+?)<span').findall( items) for i in range(0, len(items)): try: if '720p' in items[i]: quality = 'HD' else: quality = 'SD' urls = client.parseDOM(items[i], "a", ret="href") for j in range(0, len(urls)): videoID = getVideoID(urls[j]) result = client.request(self.info_link % videoID) result = result.decode('iso-8859-1').encode('utf-8') item = client.parseDOM( result, name="div", attrs={ "style": "float:none;height:700px;margin-left:200px" })[0] rUrl = re.compile( '(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]' ).findall(item)[0][1] if not rUrl.startswith( 'http:') and not rUrl.startswith('https:'): rUrl = '%s%s' % ('http:', rUrl) urls[j] = rUrl host = client.host(urls[0]) url = "##".join(urls) srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'YoDesi', 'url': url, 'direct': False }) urls = [] except Exception as e: logger.error(e) pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs if 'hd' in url.lower(): quality = 'HD' else: quality = 'SD' html = client.request(url) try: linkcode = jsunpack.unpack(html).replace('\\', '') srcs = json.loads(re.findall('sources:(.*?)\}\)', linkcode)[0]) for source in srcs: url = source['file'] host = client.host(url) self.srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'tamilgun', 'url': url, 'direct': False }) except: pass mlink = SoupStrainer('div', {'id': 'videoframe'}) videoclass = BeautifulSoup(html, parseOnlyThese=mlink) try: links = videoclass.findAll('iframe') for link in links: url = link.get('src') host = client.host(url) self.srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'tamilgun', 'url': url, 'direct': False }) except: pass mlink = SoupStrainer('div', {'class': 'entry-excerpt'}) videoclass = BeautifulSoup(html, parseOnlyThese=mlink) try: links = videoclass.findAll('iframe') for link in links: if 'http' in str(link): url = link.get('src') host = client.host(url) self.srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'tamilgun', 'url': url, 'direct': False }) except: pass try: sources = json.loads( re.findall('vdf-data-json">(.*?)<', html)[0]) url = 'https://www.youtube.com/watch?v=%s' % sources['videos'][ 0]['youtubeID'] host = client.host(url) self.srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'tamilgun', 'url': url, 'direct': False }) except: pass return self.srcs except: return self.srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs try: result = client.request(url) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '') quality = '' result = client.parseDOM( result, name="div", attrs={"class": "entry-content rich-content"})[0] result = client.parseDOM(result, name="p") try: host = '' urls = [] result = result[1::] serversList = result[::2] linksList = result[1::2] for i in range(0, len(serversList)): try: links = linksList[i] urls = client.parseDOM(links, name="a", ret="href") for j in range(0, len(urls)): try: item = client.request(urls[j], mobile=True) item = client.parseDOM(item, "td")[0] item = re.compile( '(SRC|src|data-config)=\"(.+?)\"').findall( item)[0][1] urls[j] = item except: pass if len(urls) > 1: url = "##".join(urls) else: url = urls[0] host = client.host(urls[0]) srcs.append({ 'source': host, 'parts': str(len(urls)), 'quality': quality, 'provider': 'HindiLinks4U', 'url': url, 'direct': False }) except: pass except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs if 'hd' in url.lower(): quality = 'HD' else: quality = 'SD' html = client.request(url) mlink = SoupStrainer('div', {'class': 'entry-content'}) videoclass = BeautifulSoup(html, parseOnlyThese=mlink) try: links = videoclass.findAll('iframe') for link in links: url = link.get('src') host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'rajtamil', 'url': url, 'direct': False }) except: pass try: links = videoclass.findAll('h3') for link in links: url = link.find('a')['href'] host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'rajtamil', 'url': url, 'direct': False }) except: pass try: links = videoclass.findAll('embed') for link in links: url = link.get('src') host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'rajtamil', 'url': url, 'direct': False }) except: pass try: links = videoclass.findAll('a', {'type': 'button'}) for link in links: url = re.findall("(http.*?)'", link.get('onclick'))[0] if 'tv?vq=medium#/' in url: url = url.replace('tv?vq=medium#/', '') host = client.host(url) srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'provider': 'rajtamil', 'url': url, 'direct': False }) except: pass return srcs except: return srcs