def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, 'form', attrs={'name': 'F1'})[0] k = client.parseDOM(f, 'input', ret='name', attrs={'type': 'hidden'}) for i in k: post.update({ i: client.parseDOM(f, 'input', ret='value', attrs={'name': i})[0] }) post = urllib.urlencode(post) for i in range(0, 3): try: result = client.request(url, post=post) url = client.parseDOM(result, 'div', attrs={'align': '.+?'}) url = [i for i in url if 'button_upload' in i][0] url = client.parseDOM(url, 'a', ret='href')[0] url = [ 'http' + i for i in url.split('http') if 'uptobox.com' in i ][0] return url except: time.sleep(1) except: return
def getTVrageId(imdb, tvdb, show, year): try: from modules.indexers import trakt if not imdb.startswith('tt'): imdb = 'tt' + imdb result = trakt.getTVShowSummary(imdb) result = json.loads(result) tvrage = result['ids']['tvrage'] if tvrage == None: raise Exception() return str(tvrage) except: pass try: query = urllib.quote_plus(show) query = 'http://services.tvrage.com/feeds/search.php?show=%s' % query result = client.request(query, timeout='5') result = client.parseDOM(result, "show") show = cleantitle.tv(show) years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [ i for i in result if show == cleantitle. tv(client.replaceHTMLCodes(client.parseDOM(i, "name")[0])) and any( x in client.parseDOM(i, "started")[0] for x in years) ][0] tvrage = client.parseDOM(result, "showid")[0] return str(tvrage) except: pass
def boobntit(self, url): try: main = client.request(url) link = client.parseDOM(main, 'div', attrs={'id': 'player'}) link = client.parseDOM(link, 'iframe', ret='src') link = link[0] return self.generic(link) except: return self.generic(url)
def megasesso(self, url): try: u = client.request(url) u = client.parseDOM(u, 'div', attrs={'class': 'player-iframe'}) u = [(client.parseDOM(i, 'iframe', ret='src')) for i in u] u = [(client.replaceHTMLCodes(i[0]).encode('utf-8')) for i in u] u = 'http://www.megasesso.com' + u[0] return self.generic(u) except: return
def resolve(url): try: result = client.request(url, mobile=True, close=False) try: post = {} f = client.parseDOM(result, 'Form', attrs={'method': 'POST'})[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, 'input', ret='name', attrs={'type': 'hidden'}) for i in k: post.update({ i: client.parseDOM(f, 'input', ret='value', attrs={'name': i})[0] }) post = urllib.urlencode(post) except: post = None for i in range(0, 10): try: result = client.request(url, post=post, mobile=True, close=False) result = result.replace('\n', '') result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) result = re.compile('sources *: *\[.+?\]').findall(result)[-1] result = re.compile('file *: *"(http.+?)"').findall(result) url = [i for i in result if not '.m3u8' in i] if len(url) > 0: return '%s|Referer=%s' % ( url[0], urllib.quote_plus( 'http://vidzi.tv/nplayer/jwplayer.flash.swf')) url = [i for i in result if '.m3u8' in i] if len(url) > 0: return url[0] except: time.sleep(1) except: return
def recaptcha(data): try: url = [] if data.startswith('http://www.google.com'): url += [data] url += client.parseDOM(data, 'script', ret='src', attrs={'type': 'text/javascript'}) url = [i for i in url if 'http://www.google.com' in i] if not len(url) > 0: return result = client.request(url[0]) challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0] response = 'http://www.google.com/recaptcha/api/image?c=' + challenge response = keyboard(response) return { 'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response } except: pass
def PLAYLINK(name,url,iconimage): link = open_url(url) try: url=re.compile('src="(.+?)" allowFullScreen></iframe>').findall(link)[0] except: url=re.compile("src='(.+?)' allowFullScreen></iframe>").findall(link)[0] ua='|User-Agent=Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36' #### THANKS TO LAMBDA #### import client import jsunpack url = urlparse.urlparse(url).query url = urlparse.parse_qsl(url)[0][1] url = 'http://videomega.tv/cdn.php?ref=%s' % url result = client.request(url) unpacked = '' packed = result.split('\n') for i in packed: try: unpacked += jsunpack.unpack(i) except: unpacked += i result = unpacked result = re.sub('\s\s+', ' ', result) url = re.compile('"video".+?"src"\s*\,\s*"(.+?)"').findall(result) url += client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'}) url = url[0]+ua #### THANKS TO LAMBDA #### ok=True liz=xbmcgui.ListItem(name, iconImage=icon,thumbnailImage=icon); liz.setInfo( type="Video", infoLabels={ "Title": name } ) ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz) xbmc.Player ().play(url, liz, False)
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] u = 'http://nosvideo.com/vj/video.php?u=%s&w=&h=530' % url r = 'http://nosvideo.com/%s' % url result = client.request(u, referer=r) url = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video/.+?'}) url += client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video/mp4'}) url = url[-1] return url except: return
def episodeAbsoluteNumber(self, thetvdb, season, episode): try: url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode)) return int(client.parseDOM(client.request(url), 'absolute_number')[0]) except: pass return episode
def resolve(self, url): try: id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0] result = client.request('http://www.youtube.com/watch?v=%s' % id) message = client.parseDOM(result, "div", attrs = { "id": "unavailable-submessage" }) message = ''.join(message) alert = client.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" }) if len(alert) > 0: raise Exception() if re.search('[a-zA-Z]', message): raise Exception() url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id return url except: return
def yuvutu(self, url): try: r = client.request(url) r = client.parseDOM(r, 'iframe', ret='src') r = [i for i in r if 'embed' in i][0] r = urlparse.urljoin(url, r) return self.generic(r) except: return
def __parse(sUnpacked): url = re.compile("'file' *, *'(.+?)'").findall(sUnpacked) url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(sUnpacked) url += re.compile("playlist=(.+?)&").findall(sUnpacked) url += client.parseDOM(sUnpacked, "embed", ret="src") url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[-1].split('://', 1)[-1] return url
def resolve(url): try: result = client.request(url, mobile=True) url = client.parseDOM(result, 'source', ret='src', attrs={'type': 'video.+?'})[0] return url except: return
def check(url): try: result = client.request(url) if result == None: return False result = client.parseDOM(result, 'span', attrs = {'class': 'para_title'}) if any('File not found' in x for x in result): raise Exception() return True except: return False
def getTVShowTranslation(self, thetvdb, lang): try: url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang) r = client.request(url) title = client.parseDOM(r, 'SeriesName')[0] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') return title except: pass
def check(url): try: result = client.request(url) if result == None: return False result = client.parseDOM(result, 'span', attrs={'class': 'para_title'}) if any('File not found' in x for x in result): raise Exception() return True except: return False
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://streamcloud.eu/%s' % url result = client.request(url) post = {} f = client.parseDOM(result, 'form', attrs = {'class': 'proform'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) post = post.replace('op=download1', 'op=download2') result = client.request(url, post=post) url = re.compile('file *: *"(http.+?)"').findall(result)[-1] return url except: return
def capimage(data): try: url = client.parseDOM(data, 'img', ret='src') url = [i for i in url if 'captcha' in i] if not len(url) > 0: return response = keyboard(url[0]) return {'code': response} except: pass
def capimage(data): try: url = client.parseDOM(data, "img", ret="src") url = [i for i in url if 'captcha' in i] if not len(url) > 0: return response = keyboard(url[0]) return {'code': response} except: pass
def solvemedia(data): try: url = client.parseDOM(data, 'iframe', ret='src') url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, 'iframe', ret='src') response += client.parseDOM(result, 'img', ret='src') response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, 'form', attrs = {'action': 'verify.noscript'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)) return {'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'} except: pass
def solvemedia(data): try: url = client.parseDOM(data, "iframe", ret="src") url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, "iframe", ret="src") response += client.parseDOM(result, "img", ret="src") response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, "form", attrs = { "action": "verify.noscript" })[0] k = client.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" }) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]}) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)) return {'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'} except: pass
def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, 'form', attrs = {'name': 'F1'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) for i in range(0, 3): try: result = client.request(url, post=post) url = client.parseDOM(result, 'div', attrs = {'align': '.+?'}) url = [i for i in url if 'button_upload' in i][0] url = client.parseDOM(url, 'a', ret='href')[0] url = ['http' + i for i in url.split('http') if 'uptobox.com' in i][0] return url except: time.sleep(1) except: return
def testSiteReq(self): self.headers['Referer'] = self.baseUrl html, r2, r3, r4 = client.request(self.baseUrl, headers=self.headers, output='extended') #print r2, r3, r4 #print html[:200] try: cookie = r4 if cookie == None or len(cookie) == 0: try: cookie = re.findall(r'Set-Cookie:(.*?)\n', str(r3))[0].strip() cookie = self.formatCookie(cookie) except: pass except: pass e = "Retrieved cookie: %s" % cookie log(type='INFO', method='testSiteReq', err='%s' % e, dolog=True, logToControl=True, doPrint=True) self.captchaActive = False if 'Please complete the security check to continue!' in html: self.cookie = cookie self.captchaActive = True try: self.sitekey = client.parseDOM(html, 'div', attrs={'class': 'g-recaptcha'}, ret='data-sitekey')[0] except: e = 'Could not find data-sitekey' log(type='ERROR', method='testSiteReq', err='%s' % e, dolog=True, logToControl=True, doPrint=True) else: if self.cookie == '' and len(cookie) > 0: self.cookie = cookie self.captchaActive = False return html, r2, r3, r4
def resolve(url): try: url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0] url = 'http://www.filepup.net/play/%s' % url result = client.request(url) url = client.parseDOM(result, 'source', ret='src', attrs={'type': 'video.+?'})[0] return url except: return
def getTVrageId(imdb, tvdb, show, year): try: from modules.indexers import trakt if not imdb.startswith('tt'): imdb = 'tt' + imdb result = trakt.getTVShowSummary(imdb) result = json.loads(result) tvrage = result['ids']['tvrage'] if tvrage == None: raise Exception() return str(tvrage) except: pass try: query = urllib.quote_plus(show) query = 'http://services.tvrage.com/feeds/search.php?show=%s' % query result = client.request(query, timeout='5') result = client.parseDOM(result, "show") show = cleantitle.tv(show) years = [str(year), str(int(year)+1), str(int(year)-1)] result = [i for i in result if show == cleantitle.tv(client.replaceHTMLCodes(client.parseDOM(i, "name")[0])) and any(x in client.parseDOM(i, "started")[0] for x in years)][0] tvrage = client.parseDOM(result, "showid")[0] return str(tvrage) except: pass
def recaptcha(data): try: url = [] if data.startswith('http://www.google.com'): url += [data] url += client.parseDOM(data, "script", ret="src", attrs = { "type": "text/javascript" }) url = [i for i in url if 'http://www.google.com' in i] if not len(url) > 0: return result = client.request(url[0]) challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0] response = 'http://www.google.com/recaptcha/api/image?c=' + challenge response = keyboard(response) return {'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response} except: pass
def resolve(url): try: print "In vidto url =", url url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://vidto.me/embed-%s.html' % url result = client.request(url) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = re.sub(r'(\',\d*,\d*,)', r';\1', result) result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] print "In vidto url 2=", url return url except: return
def solvemedia(data): try: url = client.parseDOM(data, 'iframe', ret='src') url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, 'iframe', ret='src') response += client.parseDOM(result, 'img', ret='src') response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, 'form', attrs={'action': 'verify.noscript'})[0] k = client.parseDOM(f, 'input', ret='name', attrs={'type': 'hidden'}) for i in k: post.update({ i: client.parseDOM(f, 'input', ret='value', attrs={'name': i})[0] }) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)) return { 'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge' } except: pass
def resolver(url, debrid): u = url u = u.replace('filefactory.com/stream/', 'filefactory.com/file/') try: if not debrid == 'realdebrid' and not debrid == True: raise Exception() if '' in credentials()['realdebrid'].values(): raise Exception() id, secret, token, refresh = credentials()['realdebrid'][ 'id'], credentials()['realdebrid']['secret'], credentials( )['realdebrid']['token'], credentials()['realdebrid']['refresh'] USER_AGENT = 'Kodi Death Streams RD/3.0' post = urllib.urlencode({'link': u}) headers = { 'Authorization': 'Bearer %s' % token, 'User-Agent': USER_AGENT } url = 'https://api.real-debrid.com/rest/1.0/unrestrict/link' result = client.request(url, post=post, headers=headers, error=True) result = json.loads(result) if 'error' in result and result['error'] == 'bad_token': result = client.request( 'https://api.real-debrid.com/oauth/v2/token', post=urllib.urlencode({ 'client_id': id, 'client_secret': secret, 'code': refresh, 'grant_type': 'http://oauth.net/grant_type/device/1.0' }), headers={'User-Agent': USER_AGENT}, error=True) result = json.loads(result) if 'error' in result: return headers['Authorization'] = 'Bearer %s' % result['access_token'] result = client.request(url, post=post, headers=headers) result = json.loads(result) url = result['download'] return url except: pass try: if not debrid == 'premiumize' and not debrid == True: raise Exception() if '' in credentials()['premiumize'].values(): raise Exception() user, password = credentials()['premiumize']['user'], credentials( )['premiumize']['pass'] url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink¶ms[login]=%s¶ms[pass]=%s¶ms[link]=%s' % ( user, password, urllib.quote_plus(u)) result = client.request(url, close=False) url = json.loads(result)['result']['location'] return url except: pass try: if not debrid == 'alldebrid' and not debrid == True: raise Exception() if '' in credentials()['alldebrid'].values(): raise Exception() user, password = credentials()['alldebrid']['user'], credentials( )['alldebrid']['pass'] login_data = urllib.urlencode({ 'action': 'login', 'login_login': user, 'login_password': password }) login_link = 'http://alldebrid.com/register/?%s' % login_data cookie = client.request(login_link, output='cookie', close=False) url = 'http://www.alldebrid.com/service.php?link=%s' % urllib.quote_plus( u) result = client.request(url, cookie=cookie, close=False) url = client.parseDOM(result, 'a', ret='href', attrs={'class': 'link_dl'})[0] url = client.replaceHTMLCodes(url) url = '%s|Cookie=%s' % (url, urllib.quote_plus(cookie)) return url except: pass try: if not debrid == 'rpnet' and not debrid == True: raise Exception() if '' in credentials()['rpnet'].values(): raise Exception() user, password = credentials()['rpnet']['user'], credentials( )['rpnet']['pass'] login_data = urllib.urlencode({ 'username': user, 'password': password, 'action': 'generate', 'links': u }) login_link = 'http://premium.rpnet.biz/client_api.php?%s' % login_data result = client.request(login_link, close=False) result = json.loads(result) url = result['links'][0]['generated'] return url except: return
def generic(self, url, pattern=None): try: r = client.request(url) if 'chaturbate' in url: if '.m3u8' not in r: return 'offline' if pattern: s = re.findall(r'%s' % pattern, r) else: patterns = [ r'''\s*=\s*[\'\"](http.+?)[\'\"]''', \ r'''\s*=\s*['"](http.+?)['"]''', \ r'''['"][0-9_'"]+:\s[\'\"]([^'"]+)''', \ r'''\(\w+\([\'\"]([^\'\"]*)''', \ r'''[\'\"]\w+[\'\"]:['"]([^'"]*)''', \ r'''\s*=\s*[\'\"](http.+?)[\'\"]''', \ r'''\s*:\s*[\'\"](//.+?)[\'\"]''', \ r'''\:[\'\"](\.+?)[\'\"]''', \ r'''\s*\(\s*[\'\"](http.+?)[\'\"]''', \ r'''\s*=\s*[\'\"](//.+?)[\'\"]''', \ r'''\w*:\s*[\'\"](http.+?)[\'\"]''', \ r'''\w*=[\'\"]([^\'\"]*)''', \ r'''\w*\s*=\s*[\'\"]([^\'\"]*)''', \ r'''(?s)<file>([^<]*)''', \ ] s = [] for pattern in patterns: l = re.findall(pattern, r) s += [ i for i in l if (urlparse.urlparse(i).path).strip('/').split('/') [-1].split('.')[-1] in ['mp4', 'flv', 'm3u8'] ] if s: s = [ i for i in s if (urlparse.urlparse(i).path).strip('/').split('/') [-1].split('.')[-1] in ['mp4', 'flv', 'm3u8'] ] else: s = client.parseDOM(r, 'source', ret='src', attrs={'type': 'video.+?'}) if not s: log_utils.log( 'Error resolving %s :: Error: %s' % (url, str(e)), log_utils.LOGERROR) return s = ['http:' + i if i.startswith('//') else i for i in s] s = [ urlparse.urljoin(url, i) if not i.startswith('http') else i for i in s ] s = [x for y, x in enumerate(s) if x not in s[:y]] self.u = [] def request(i): try: i = i.replace(' ', '%20') c = client.request(i, output='headers', referer=url) checks = ['video', 'mpegurl', 'html'] if any(f for f in checks if f in c['Content-Type']): self.u.append((i, int(c['Content-Length']))) except: pass threads = [] for i in s: threads.append(workers.Thread(request, i)) [i.start() for i in threads] [i.join() for i in threads] u = sorted(self.u, key=lambda x: x[1])[::-1] mobile_mode = kodi.get_setting('mobile_mode') if mobile_mode == 'true': u = client.request(u[-1][0], output='geturl', referer=url) else: u = client.request(u[0][0], output='geturl', referer=url) log_utils.log('Returning %s from XXX-O-DUS Resolver' % str(u), log_utils.LOGNOTICE) return u except Exception as e: log_utils.log('Error resolving %s :: Error: %s' % (url, str(e)), log_utils.LOGERROR)
def returnFinalLink(url): #url = 'http://xpau.se/watch/war-for-the-planet-of-the-apes' #site = self.base_link headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*//**;q=0.8', 'Accept-Language': 'en-US,en;q=0.8', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive' } headers[ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' headers['Cookie'] = '' headers['Referer'] = url for x in range(0, 15): if 'wait' in url: cookie = client.request(url, output='cookie', headers=headers) if cookie != None and len(cookie) > 0: headers['Cookie'] = headers['Cookie'] + cookie #url = client.request(url, output='geturl', headers=headers) #print 'wait-url', url resp = client.request(url, headers=headers) headers['Referer'] = url #print resp if 'playthevid' in resp: #print '---> playthevid' r = client.parseDOM(resp, 'a', ret='href', attrs={'id': 'playthevid'})[0] cookie = client.request(url, output='cookie', headers=headers) #print cookie if cookie != None and len(cookie) > 0: if len(headers['Cookie']) == 0: headers['Cookie'] = headers['Cookie'] + cookie else: headers['Cookie'] = headers['Cookie'] + cookie elif 'skipper' in resp: #print '---> skipper' try: r = client.parseDOM(resp, 'a', ret='href', attrs={'id': 'skipper'})[0] try: parts = re.findall(r'var.*\"(.*I.*l.*)\".*;', resp) r = '/watch/' + parts[1] + parts[0] + parts[2] except: Log('Parts decoding failed in skipper') except: pass if 'http' not in r: r = clean_url(r) cookie = client.request(r, output='cookie', headers=headers) #print cookie if cookie != None and len(cookie) > 0: if len(headers['Cookie']) == 0: headers['Cookie'] = headers['Cookie'] + cookie else: headers['Cookie'] = headers['Cookie'] + cookie else: #print '---> iframe' #print resp try: r = client.parseDOM(resp, 'iframe', ret='src')[0] except: Log('Could not find final url in iframe') return None if 'google' in r: return r if 'http' not in r: r = clean_url(r) url = r else: url = r
import urllib, urllib2, re, xbmcplugin, xbmcgui, sys, xbmc, xbmcaddon, os, random, urlparse
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == True: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] c = client.parseDOM(post, 'content.+?')[0] u = re.findall('<singlelink>(.+?)(?:<download>|$)', c.replace('\n', ''))[0] u = client.parseDOM(u, 'a', ret='href') s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', c) s = s[0] if s else '0' items += [(t, i, s) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'caMr Blamoip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True, 'debridonly': False }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources