def get_links1(self, cItem): urlTab = [] printDBG('0000111111') #try: URL = cItem['url'] printDBG('url=' + URL) paramsUrl = dict(self.defaultParams1) paramsUrl['header']['Referer'] = cItem['url0'] self.cm.clearCookie(self.COOKIE_FILE1, removeNames=['golink']) paramsUrl['use_new_session'] = True self.getPage(URL, paramsUrl) paramsUrl.pop('use_new_session') printDBG('444444444444ggggg' + str(self.cm.getCookieItems(self.COOKIE_FILE))) data = self.cm.getCookieItems(self.COOKIE_FILE1) if 'golink' in data: data = json_loads(urllib.unquote(data['golink'])) paramsUrl = dict(self.defaultParams) paramsUrl['header']['Referer'] = cItem['url'] url_ = data['route'] printDBG('11111111') sts, data = self.getPage(url_, paramsUrl, type_=0) if sts: cUrl = url_ #data.meta['url'] printDBG('2222222222') url_dat = re.findall('<iframe[^>]+?src=[\'"]([^"^\']+?)[\'"]', data, re.S | re.IGNORECASE) if not url_dat: GetIPTVSleep().Sleep(6) paramsUrl = dict(self.defaultParams) paramsUrl['header'] = dict(self.AJAX_HEADER) paramsUrl['header']['Referer'] = cUrl printDBG('33333333') sts, data = self.getPage(cUrl, paramsUrl, {}) if sts: printDBG('2222222222333' + data + '#') data = json_loads(data) urlTab.append({ 'name': 'direct_link', 'url': data['direct_link'] }) else: printDBG('4444444') urL_ = url_dat[0] if urL_.startswith('//'): urL_ = 'http:' + urL_ if not urL_.startswith('http'): urL_ = 'http://' + urL_ urlTab.append({ 'name': 'link', 'url': strwithmeta(urL_, {'Referer': cUrl}), 'need_resolve': 1 }) #except: # printDBG('Erreur') return urlTab
def getVideos(self, videoUrl): HTTP_HEADER = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0', 'Accept': 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate' } http_params = { 'header': HTTP_HEADER, 'with_metadata': True, 'cookiefile': self.COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True } printDBG(' -----------> URL = ' + videoUrl) urlTab = [] referer = self.MAIN_URL if '%%%' in videoUrl: videoUrl, referer, code = videoUrl.split('%%%', 2) if not videoUrl.startswith('http'): videoUrl = self.MAIN_URL + videoUrl if 'watch/?v' in videoUrl: #try: printDBG('try resolve url0: ' + videoUrl) urlTab = self.parserVIDSTREAM(videoUrl, 'egy') #except Exception as e: #printDBG('ERREUR:'+str(e)) else: addParams0 = dict(self.defaultParams) addParams0['header']['Referer'] = referer http_params['header']['Referer'] = referer sts, data = self.cm.getPage(self.MAIN_URL + '/' + atob(code), http_params) printDBG('data_meta0=' + str(data.meta)) GetIPTVSleep().Sleep(5) sts, data = self.cm.getPage(videoUrl, http_params) if sts: printDBG('data=' + str(data)) printDBG('data_meta1=' + str(data.meta)) if False: URL = data.meta['location'] VID_URL = urlparser.getDomain(URL, onlyDomain=False) if VID_URL.endswith('/'): VID_URL = VID_URL[:-1] self.VID_URL = VID_URL printDBG('HOST vstream = ' + self.VID_URL) try: printDBG('try resolve url1: ' + URL) urlTab = self.parserVIDSTREAM(URL) except Exception as e: printDBG('ERREUR:' + str(e)) return urlTab
def getVideoLinks(self, url): printDBG("StreamingSeriesWatch.getVideoLinks [%s]" % url) urlTab = [] if 'protect-stream.com' not in url: return [] sts, data = self.getPage(url, self.defaultParams) if not sts: return [] cUrl = self.cm.meta['url'] k = self.cm.ph.getSearchGroups(data, 'var\s+?k[^"]*?=[^"]*?"([^"]+?)"')[0] secure = self.cm.ph.getSearchGroups(data, '''['"/](secur[^\.]*?)\.js''')[0] try: sts, tmp = self.getPage( 'https://www.protect-stream.com/%s.js' % secure, self.defaultParams) count = self.cm.ph.getSearchGroups( tmp, 'var\s+?count\s*?=\s*?([0-9]+?);')[0] if int(count) < 15: GetIPTVSleep().Sleep(int(count)) except Exception: printExc() return [] header = dict(self.defaultParams['header']) params = dict(self.defaultParams) header['Referer'] = cUrl header['Content-Type'] = "application/x-www-form-urlencoded" header['Accept-Encoding'] = 'gzip, deflate' params['header'] = header params['use_cookie'] = False sts, data = self.getPage( 'https://www.protect-stream.com/%s.php' % secure, params, {'k': k}) if not sts: return [] printDBG('==========================================') printDBG(data) printDBG('==========================================') tmp = re.compile('''<iframe[^>]+?src=['"]([^'^"]+?)['"]''').findall( data) tmp.extend( re.compile('''<a[^>]+?href=['"]([^'^"]+?)['"]''').findall(data)) for videoUrl in tmp: videoUrl = self.getFullUrl(videoUrl) urlTab.extend(self.up.getVideoLinkExt(videoUrl)) return urlTab
def _unshorten_iivpl(self, baseUri): baseUri = strwithmeta(baseUri) ref = baseUri.meta.get('Referer', baseUri) USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0' HTTP_HEADER = {'User-Agent':USER_AGENT, 'Accept':'*/*', 'Accept-Encoding':'gzip, deflate', 'Referer':ref} HTTP_HEADER_AJAX = {'User-Agent':USER_AGENT, 'Accept':'*/*', 'Accept-Encoding':'gzip, deflate', 'Referer':baseUri, 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With':'XMLHttpRequest'} COOKIE_FILE = GetCookieDir('iit.pl') tries = 0 retUri, retSts = '', 'KO' while tries < 2 and retSts != 'OK': tries += 1 rm(COOKIE_FILE) try: params = {'header':HTTP_HEADER, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': COOKIE_FILE} sts, data = self.cm.getPage(baseUri, params) sts, headers = self.cm.getPage('http://iiv.pl/modules/system/assets/js/framework.js', params) headers = self.cm.ph.getDataBeetwenMarkers(headers, 'headers', '}')[1] headers = re.compile('''['"]([^'^"]+?)['"]''').findall(headers) salt = self.cm.ph.getSearchGroups(data, '''data\-salt="([^"]+?)"''')[0] time = self.cm.ph.getSearchGroups(data, '''data\-time="([^"]+?)"''')[0] action = self.cm.ph.getSearchGroups(data, '''data\-action="([^"]+?)"''')[0] banner = self.cm.ph.getSearchGroups(data, '''data\-banner="([^"]+?)"''')[0] component = self.cm.ph.getSearchGroups(data, '''data\-component="([^"]+?)"''')[0] if tries > 1: GetIPTVSleep().Sleep(int(time)) sts, partials = self.cm.getPage('http://iiv.pl/themes/cutso/assets/javascript/shortcut/shortcut.js', params) partials = self.cm.ph.getDataBeetwenMarkers(partials, 'update:', '}')[1] partials = self.cm.ph.getSearchGroups(partials, '''['"]([^'^"]+?)['"]''')[0] if partials == '': partials = 'shortcut/link_show' for header in headers: if 'HANDLER' in header: HTTP_HEADER_AJAX[header] = action elif 'PARTIALS' in header: HTTP_HEADER_AJAX[header] = partials post_data = {'salt':salt, 'banner':banner, 'blocker':0} params['header'] = HTTP_HEADER_AJAX sts, data = self.cm.getPage(baseUri, params, post_data) data = json_loads(data) printDBG(">>>%s<<<" % data) uri = self.cm.ph.getSearchGroups(data[partials], '''href="(https?://[^"]+?)"''')[0] retUri, retSts = uri, 'OK' except Exception as e: retUri, retSts = baseUri, str(e) printExc() return retUri, retSts
def getPage(self,baseUrl, addParams = {}, post_data = None): while True: if addParams == {}: addParams = dict(self.defaultParams) origBaseUrl = baseUrl baseUrl = self.cm.iriToUri(baseUrl) addParams['cloudflare_params'] = {'cookie_file':self.COOKIE_FILE, 'User-Agent':self.USER_AGENT} sts, data = self.cm.getPageCFProtection(baseUrl, addParams, post_data) printDBG(str(sts)) if sts and 'class="loading"' in data: GetIPTVSleep().Sleep(5) continue break return sts, data
def processCaptcha(self, sitekey, referer=''): sleepObj = None token = '' errorMsgTab = [] apiKey = config.plugins.iptvplayer.api_key_9kweu.value apiUrl = self.getFullUrl('/index.cgi?apikey=') + apiKey + '&action=usercaptchaupload&interactive=1&json=1&file-upload-01=' + sitekey + '&oldsource=recaptchav2&pageurl=' + urllib.quote(referer) try: token = '' sts, data = self.cm.getPage(apiUrl) if sts: printDBG('API DATA:\n%s\n' % data) data = json_loads(data) if 'captchaid' in data: captchaid = data['captchaid'] sleepObj = GetIPTVSleep() sleepObj.Sleep(300, False) tries = 0 while True: tries += 1 timeout = sleepObj.getTimeout() if tries == 1: timeout = 10 elif timeout > 10: timeout = 5 time.sleep(timeout) apiUrl = self.getFullUrl('/index.cgi?apikey=') + apiKey + '&action=usercaptchacorrectdata&json=1&id=' + captchaid sts, data = self.cm.getPage(apiUrl) if not sts: continue # maybe simple continue here ? errorMsgTab.append(_('Network failed %s.') % '2') break else: printDBG('API DATA:\n%s\n' % data) data = json_loads(data) token = data['answer'] if token != '': break if sleepObj.getTimeout() == 0: errorMsgTab.append(_('%s timeout.') % self.getMainUrl()) break else: errorMsgTab.append(data['error']) else: errorMsgTab.append(_('Network failed %s.') % '1') except Exception as e: errorMsgTab.append(str(e)) printExc() if sleepObj != None: sleepObj.Reset() if token == '': self.sessionEx.waitForFinishOpen(MessageBox, (_('Resolving reCaptcha with %s failed!\n\n') % self.getMainUrl()) + '\n'.join(errorMsgTab), type = MessageBox.TYPE_ERROR, timeout = 10) return token
def processCaptcha(self, sitekey, referer='', action='verify'): sleepObj = None token = '' errorMsgTab = [] apiKey = config.plugins.iptvplayer.api_key_2captcha.value apiUrl = self.getFullUrl('/in.php?key=') + apiKey + '&method=userrecaptcha&version=v3&action=' + action + '&min_score=0.3&googlekey=' + sitekey + '&json=1&pageurl=' + urllib.quote(referer) try: token = '' sts, data = self.cm.getPage(apiUrl) if sts: printDBG('API DATA:\n%s\n' % data) data = json_loads(data, '', True) if data['status'] == '1': captchaid = data['request'] sleepObj = GetIPTVSleep() sleepObj.Sleep(300, False) tries = 0 while True: tries += 1 timeout = sleepObj.getTimeout() if tries == 1: timeout = 10 elif timeout > 10: timeout = 5 time.sleep(timeout) apiUrl = self.getFullUrl('/res.php?key=') + apiKey + '&action=get&json=1&id=' + captchaid sts, data = self.cm.getPage(apiUrl) if not sts: continue # maybe simple continue here ? errorMsgTab.append(_('Network failed %s.') % '2') break else: printDBG('API DATA:\n%s\n' % data) data = json_loads(data, '', True) if data['status'] == '1' and data['request'] != '': token = data['request'] break if sleepObj.getTimeout() == 0: errorMsgTab.append(_('%s timeout.') % self.getMainUrl()) break else: errorMsgTab.append(data['request']) else: errorMsgTab.append(_('Network failed %s.') % '1') except Exception as e: errorMsgTab.append(str(e)) printExc() if sleepObj != None: sleepObj.Reset() if token == '': self.sessionEx.waitForFinishOpen(MessageBox, (_('Resolving reCaptcha with %s failed!\n\n') % self.getMainUrl()) + '\n'.join(errorMsgTab), type = MessageBox.TYPE_ERROR, timeout = 10) return token
def getPage(self,baseUrl, addParams = {}, post_data = None): if addParams == {}: addParams = dict(self.defaultParams) sts, data = self.cm.getPage(baseUrl,addParams,post_data) if not data: data='' if '!![]+!![]' in data: try: if os.path.exists(self.COOKIE_FILE): os.remove(self.COOKIE_FILE) printDBG('cookie removed') printDBG('Start CLoudflare Vstream methode') oRequestHandler = cRequestHandler(baseUrl) if post_data: post_data_vstream = '' for key in post_data: if post_data_vstream=='': post_data_vstream=key+'='+post_data[key] else: post_data_vstream=post_data_vstream+'&'+key+'='+post_data[key] oRequestHandler.setRequestType(cRequestHandler.REQUEST_TYPE_POST) oRequestHandler.addParametersLine(post_data_vstream) data = oRequestHandler.request() sts = True printDBG('cook_vstream_file='+self.up.getDomain(baseUrl).replace('.','_')) cook = GestionCookie().Readcookie(self.up.getDomain(baseUrl).replace('.','_')) printDBG('cook_vstream='+cook) if ';' in cook: cook_tab = cook.split(';') else: cook_tab = cook cj = self.cm.getCookie(self.COOKIE_FILE) for item in cook_tab: if '=' in item: printDBG('item='+item) cookieKey, cookieValue = item.split('=') cookieItem = cookielib.Cookie(version=0, name=cookieKey, value=cookieValue, port=None, port_specified=False, domain='.'+self.cm.getBaseUrl(baseUrl, True), domain_specified=True, domain_initial_dot=True, path='/', path_specified=True, secure=False, expires=time.time()+3600*48, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False) cj.set_cookie(cookieItem) cj.save(self.COOKIE_FILE, ignore_discard = True) GetIPTVSleep().Sleep(2) printDBG('Cookie saved') except Exception, e: printDBG('ERREUR:'+str(e)) printDBG('Start CLoudflare E2iplayer methode') addParams['cloudflare_params'] = {'domain':self.up.getDomain(baseUrl), 'cookie_file':self.COOKIE_FILE, 'User-Agent':self.USER_AGENT} sts, data = self.cm.getPageCFProtection(baseUrl, addParams, post_data)
def start(self, cItem): if os.path.exists(self.path_listing + '.search'): os.remove(self.path_listing + '.search') GetIPTVSleep().Sleep(5) files = glob.glob(self.MyPath + 'tmdb/*') for f in files: os.remove(f) self.currList = [] mode = cItem.get('mode', None) printDBG('Start:' + str(cItem)) if mode == '00': self.showmenu(cItem) #self.showmenuHome(cItem) if mode == '01': self.showmenu0(cItem) if mode == '02': self.searchGlobal(cItem) if mode == '03': self.showmenuHome(cItem) if mode == '10': self.showmenu1(cItem)
def getPage(self, baseUrl, addParams={}, post_data=None): while True: if addParams == {}: addParams = dict(self.defaultParams) origBaseUrl = baseUrl baseUrl = self.cm.iriToUri(baseUrl) def _getFullUrl(url): if self.cm.isValidUrl(url): return url else: return urlparse.urljoin(baseUrl, url) addParams['cloudflare_params'] = { 'domain': self.up.getDomain(baseUrl), 'cookie_file': self.COOKIE_FILE, 'User-Agent': self.USER_AGENT, 'full_url_handle': _getFullUrl } sts, data = self.cm.getPageCFProtection(baseUrl, addParams, post_data) if sts and addParams.get('return_data', True) and 'class="loading"' in data: GetIPTVSleep().Sleep(5) continue break return sts, data
def getPageCFProtection(self, baseUrl, params={}, post_data=None): cfParams = params.get('cloudflare_params', {}) def _getFullUrlEmpty(url): return url _getFullUrl = cfParams.get('full_url_handle', _getFullUrlEmpty) _getFullUrl2 = cfParams.get('full_url_handle2', _getFullUrlEmpty) url = baseUrl header = { 'Referer': url, 'User-Agent': cfParams.get('User-Agent', ''), 'Accept-Encoding': 'text' } header.update(params.get('header', {})) params.update({ 'with_metadata': True, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': cfParams.get('cookie_file', ''), 'header': header }) sts, data = self.getPage(url, params, post_data) current = 0 while current < 5: #if True: if not sts and None != data: start_time = time.time() current += 1 doRefresh = False try: domain = self.getBaseUrl(data.fp.geturl()) verData = data.fp.read() if data.fp.info().get('Content-Encoding', '') == 'gzip': verData = DecodeGzipped(verData) printDBG("------------------") printDBG(verData) printDBG("------------------") if 'sitekey' not in verData and 'challenge' not in verData: break printDBG(">>") printDBG(verData) printDBG("<<") sitekey = self.ph.getSearchGroups( verData, 'data-sitekey="([^"]+?)"')[0] id = self.ph.getSearchGroups(verData, 'data-ray="([^"]+?)"')[0] if sitekey != '': from Plugins.Extensions.IPTVPlayer.libs.recaptcha_v2 import UnCaptchaReCaptcha # google captcha recaptcha = UnCaptchaReCaptcha(lang=GetDefaultLang()) recaptcha.HTTP_HEADER['Referer'] = baseUrl if '' != cfParams.get('User-Agent', ''): recaptcha.HTTP_HEADER['User-Agent'] = cfParams[ 'User-Agent'] token = recaptcha.processCaptcha(sitekey) if token == '': return False, None sts, tmp = self.ph.getDataBeetwenMarkers( verData, '<form', '</form>', caseSensitive=False) if not sts: return False, None url = self.ph.getSearchGroups(tmp, 'action="([^"]+?)"')[0] if url != '': url = _getFullUrl(url) else: url = data.fp.geturl() actionType = self.ph.getSearchGroups( tmp, 'method="([^"]+?)"', 1, True)[0].lower() post_data2 = dict( re.findall( r'<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"[^>]*>', tmp)) #post_data2['id'] = id if '' != token: post_data2['g-recaptcha-response'] = token else: continue params2 = dict(params) params2['header'] = dict(params['header']) params2['header']['Referer'] = baseUrl if actionType == 'get': if '?' in url: url += '&' else: url += '?' url += urllib.urlencode(post_data2) post_data2 = None sts, data = self.getPage(url, params2, post_data2) printDBG( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" ) printDBG(sts) printDBG( "------------------------------------------------------------------" ) printDBG(data) printDBG( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" ) else: dat = self.ph.getAllItemsBeetwenNodes( verData, ('<script', '>'), ('</script', '>'), False) for item in dat: if 'setTimeout' in item and 'submit()' in item: dat = item break decoded = '' jscode = base64.b64decode( '''ZnVuY3Rpb24gc2V0VGltZW91dCh0LGUpe2lwdHZfcmV0LnRpbWVvdXQ9ZSx0KCl9dmFyIGlwdHZfcmV0PXt9LGlwdHZfZnVuPW51bGwsZG9jdW1lbnQ9e30sd2luZG93PXRoaXMsZWxlbWVudD1mdW5jdGlvbih0KXt0aGlzLl9uYW1lPXQsdGhpcy5fc3JjPSIiLHRoaXMuX2lubmVySFRNTD0iIix0aGlzLl9wYXJlbnRFbGVtZW50PSIiLHRoaXMuc2hvdz1mdW5jdGlvbigpe30sdGhpcy5hdHRyPWZ1bmN0aW9uKHQsZSl7cmV0dXJuInNyYyI9PXQmJiIjdmlkZW8iPT10aGlzLl9uYW1lJiZpcHR2X3NyY2VzLnB1c2goZSksdGhpc30sdGhpcy5maXJzdENoaWxkPXtocmVmOmlwdHZfZG9tYWlufSx0aGlzLnN0eWxlPXtkaXNwbGF5OiIifSx0aGlzLnN1Ym1pdD1mdW5jdGlvbigpe3ByaW50KEpTT04uc3RyaW5naWZ5KGlwdHZfcmV0KSl9LE9iamVjdC5kZWZpbmVQcm9wZXJ0eSh0aGlzLCJzcmMiLHtnZXQ6ZnVuY3Rpb24oKXtyZXR1cm4gdGhpcy5fc3JjfSxzZXQ6ZnVuY3Rpb24odCl7dGhpcy5fc3JjPXR9fSksT2JqZWN0LmRlZmluZVByb3BlcnR5KHRoaXMsImlubmVySFRNTCIse2dldDpmdW5jdGlvbigpe3JldHVybiB0aGlzLl9pbm5lckhUTUx9LHNldDpmdW5jdGlvbih0KXt0aGlzLl9pbm5lckhUTUw9dH19KSxPYmplY3QuZGVmaW5lUHJvcGVydHkodGhpcywidmFsdWUiLHtnZXQ6ZnVuY3Rpb24oKXtyZXR1cm4iIn0sc2V0OmZ1bmN0aW9uKHQpe2lwdHZfcmV0LmFuc3dlcj10fX0pfSwkPWZ1bmN0aW9uKHQpe3JldHVybiBuZXcgZWxlbWVudCh0KX07ZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQ9ZnVuY3Rpb24odCl7cmV0dXJuIG5ldyBlbGVtZW50KHQpfSxkb2N1bWVudC5jcmVhdGVFbGVtZW50PWZ1bmN0aW9uKHQpe3JldHVybiBuZXcgZWxlbWVudCh0KX0sZG9jdW1lbnQuYXR0YWNoRXZlbnQ9ZnVuY3Rpb24oKXtpcHR2X2Z1bj1hcmd1bWVudHNbMV19Ow==''' ) jscode = "var location = {hash:''}; var iptv_domain='%s';\n%s\n%s\niptv_fun();" % ( domain, jscode, dat) #cfParams['domain'] printDBG("+ CODE +") printDBG(jscode) printDBG("++++++++") ret = iptv_js_execute(jscode) decoded = byteify(json.loads(ret['data'].strip())) verData = self.ph.getDataBeetwenReMarkers( verData, re.compile('<form[^>]+?id="challenge-form"'), re.compile('</form>'), False)[1] printDBG(">>") printDBG(verData) printDBG("<<") verUrl = _getFullUrl( self.ph.getSearchGroups(verData, 'action="([^"]+?)"')[0]) get_data = dict( re.findall( r'<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"[^>]*>', verData)) get_data['jschl_answer'] = decoded['answer'] verUrl += '?' for key in get_data: verUrl += '%s=%s&' % (key, get_data[key]) verUrl = _getFullUrl( self.ph.getSearchGroups(verData, 'action="([^"]+?)"') [0]) + '?jschl_vc=%s&pass=%s&jschl_answer=%s' % ( get_data['jschl_vc'], get_data['pass'], get_data['jschl_answer']) verUrl = _getFullUrl2(verUrl) params2 = dict(params) params2['load_cookie'] = True params2['save_cookie'] = True params2['header'] = dict(params.get('header', {})) params2['header'].update({ 'Referer': url, 'User-Agent': cfParams.get('User-Agent', ''), 'Accept-Encoding': 'text' }) printDBG("Time spent: [%s]" % (time.time() - start_time)) if current == 1: GetIPTVSleep().Sleep(1 + (decoded['timeout'] / 1000.0) - (time.time() - start_time)) else: GetIPTVSleep().Sleep((decoded['timeout'] / 1000.0)) printDBG("Time spent: [%s]" % (time.time() - start_time)) printDBG("Timeout: [%s]" % decoded['timeout']) sts, data = self.getPage(verUrl, params2, post_data) except Exception: printExc() break else: break return sts, data
def getPage(self, baseUrl, addParams={}, post_data=None): tries = 0 cUrl = '' while tries < 4: tries += 1 if addParams == {}: addParams = dict(self.defaultParams) sts, data = self.cm.getPage(baseUrl, addParams, post_data) if not sts: return sts, data cUrl = self.cm.meta['url'] if 'DDoS' in data: if tries == 1: rm(self.COOKIE_FILE) continue timestamp = time.time() * 1000 jscode = '' tmp = ph.findall(data, ('<script', '>'), '</script>', flags=0) for item in tmp: if 'xhr.open' in item: jscode = item break js_params = [{'path': GetJSScriptFile('cinemaxx1.byte')}] js_params.append({'code': jscode}) ret = js_execute_ext(js_params) if ret['sts'] and 0 == ret['code']: try: tmp = ret['data'].split('\n', 1) sleep_time = int(float(tmp[1])) tmp = json_loads(tmp[0]) url = self.getFullUrl(tmp['1'], cUrl) params = dict(addParams) params['header'] = MergeDicts(self.HTTP_HEADER, {'Referer': cUrl}) sts2, data2 = self.cm.getPage(url, params) if not sts2: break js_params = [{ 'path': GetJSScriptFile('cinemaxx2.byte') }] js_params.append( {'code': data2 + 'print(JSON.stringify(e2iobj));'}) ret = js_execute_ext(js_params) if ret['sts'] and 0 == ret['code']: cj = self.cm.getCookie(self.COOKIE_FILE) for item in json_loads(ret['data'])['cookies']: for cookieKey, cookieValue in item.iteritems(): cookieItem = cookielib.Cookie( version=0, name=cookieKey, value=cookieValue, port=None, port_specified=False, domain='.' + self.cm.getBaseUrl(cUrl, True), domain_specified=True, domain_initial_dot=True, path='/', path_specified=True, secure=False, expires=time.time() + 3600 * 48, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False) cj.set_cookie(cookieItem) cj.save(self.COOKIE_FILE, ignore_discard=True) sleep_time -= time.time() * 1000 - timestamp if sleep_time > 0: GetIPTVSleep().Sleep( int(math.ceil(sleep_time / 1000.0))) continue else: break except Exception: printExc() else: break if sts and cUrl: self.cm.meta['url'] = cUrl return sts, data
sPattern = "(\s*eval\s*\(\s*function(?:.|\s)+?)<\/script>" aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): print 'code Dean Edwards Packer' sUnpacked = cPacker().unpack(aResult[1][0]) sPattern = '\("src", *"([^\)"<>]+?)"\)' aResult = oParser.parse(sUnpacked, sPattern) if (aResult[0] == True): api_call = aResult[1][0] #Troisieme test, lien non code if not api_call: sPattern = '<source src="([^"]+)" type="video[^"]*"\/>' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): print 'non code' api_call = aResult[1][0] #print 'url : ' + api_call if (api_call): api_call = api_call + '|User-Agent=' + UA + '&Referer=' + self.__sUrl GetIPTVSleep().Sleep(6) #xbmc.sleep(6000) return True, api_call return False, False
def parserVIDSTREAM(self, url, hst='vidstream'): if hst == 'vidstream': COOKIE_FILE = GetCookieDir('vidstream55.cookie') main_url = self.VID_URL else: COOKIE_FILE = self.COOKIE_FILE main_url = self.MAIN_URL printDBG('parserVIDSTREAM_Tsiplayer_egybest baseUrl[%s]' % url) HTTP_HEADER = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0', 'Accept': 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate' } http_params = { 'header': HTTP_HEADER, 'with_metadata': True, 'cookiefile': COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True } sts, data = self.cm.getPage(url, http_params) if not sts: return url2 = re.findall("<source src=[\"'](.*?)[\"']", data) #printDBG('Data0='+data) if (not url2) or ('/' not in url2[0]): # look for javascript script = '' tmp_script = re.findall("<script.*?>(.*?)</script>", data, re.S) for s in tmp_script: if s.startswith('function'): script = s break if script: printDBG(script) printDBG("------------ Gettttttt") OUT = VidStream(script) printDBG("------------ OUT" + str(OUT)) if 'ERR' in str(OUT): printDBG('Error: %s' % OUT.replace('ERR:', '')) else: AJAX_HEADER = { 'Accept': '*/*', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Origin': self.cm.getBaseUrl(url), 'Referer': url, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0', 'X-Requested-With': 'XMLHttpRequest' } printDBG('OUT = %s' % str(OUT)) cookies_ = self.cm.getCookieItems(self.COOKIE_FILE) #printDBG('cookies_1='+str(cookies_)) sts, data = self.cm.getPage(main_url + OUT[0], http_params) #sts, data = self.cm.getPage('https://s.vidstream.to/style.css', http_params) printDBG('cookies_2=' + str(cookies_)) zone = self.cm.ph.getSearchGroups( data, '''name=['"]zone['"] value=['"]([^'^"]+?)['"]''')[0] rb = self.cm.ph.getSearchGroups( data, '''name=['"]rb['"] value=['"]([^'^"]+?)['"]''')[0] #printDBG("------------ zone[%s] rb[%s]" % (zone, rb)) cv_url = main_url + OUT[1] sts, ret = self.cm.getPage( cv_url, { 'header': AJAX_HEADER, 'cookiefile': COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True }, OUT[2]) if sts: printDBG("------------ ret[%s]" % ret) if 'ok' in ret: if '?' in url: url2 = url + "&r=" else: url2 = url + "?r=" # retry to load the page GetIPTVSleep().Sleep(1) http_params['header']['Referer'] = url sts, data = self.cm.getPage(url2, http_params) printDBG('Data1=' + data) urlTab = [] url3 = re.findall("<source src=[\"'](.*?)[\"']", data) if url3: printDBG("------------ url3 %s" % url3) url3 = self.cm.getFullUrl(url3[0], self.cm.getBaseUrl(url)) if 'mp4' in url3: urlTab.append((url3, '0')) else: urlTab.append((url3, '3')) return urlTab
referer = self.MAIN_URL if '%%%' in videoUrl: videoUrl,referer,code = videoUrl.split('%%%',2) if not videoUrl.startswith('http'): videoUrl=self.MAIN_URL+videoUrl if 'watch/?v' in videoUrl: try: printDBG('try resolve url0: '+videoUrl) urlTab = self.parserVIDSTREAM(videoUrl,'egy') except Exception, e: printDBG('ERREUR:'+str(e)) else: addParams0 = dict(self.defaultParams) addParams0['header']['Referer']=referer http_params['header']['Referer']=referer sts, data = self.cm.getPage(self.MAIN_URL+'/'+atob(code),http_params) printDBG('data_meta0='+str(data.meta)) GetIPTVSleep().Sleep(5) sts, data = self.cm.getPage(videoUrl,http_params) if sts: printDBG('data='+str(data)) printDBG('data_meta1='+str(data.meta)) if False: URL = data.meta['location'] VID_URL = urlparser.getDomain(URL, onlyDomain=False) if VID_URL.endswith('/'): VID_URL = VID_URL[:-1] self.VID_URL = VID_URL printDBG('HOST vstream = '+self.VID_URL) try: printDBG('try resolve url1: '+URL) urlTab = self.parserVIDSTREAM(URL) except Exception, e:
def parserVIDSTREAM(self, url, hst='vidstream'): if hst == 'vidstream': COOKIE_FILE = GetCookieDir('vidstream5.cookie') main_url = self.VID_URL else: COOKIE_FILE = self.COOKIE_FILE main_url = self.MAIN_URL printDBG('parserVIDSTREAM_Tsiplayer_egybest baseUrl[%s]' % url) HTTP_HEADER = { 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0', 'Accept': 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate' } http_params = { 'header': HTTP_HEADER, 'cookiefile': COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True } sts, data = self.cm.getPage(url, http_params) if not sts: return url2 = re.findall("<source src=[\"'](.*?)[\"']", data) #printDBG('Data0='+data) if (not url2) or ('/' not in url2[0]): # look for javascript script = '' tmp_script = re.findall("<script.*?>(.*?)</script>", data, re.S) for s in tmp_script: if s.startswith('function'): script = s break if script: printDBG(script) printDBG("------------") # model for step }(a, 0x1b4)); # search for big list of words tmpStep = re.findall( "}\(" + self.varconst + "a ?,(0x[0-9a-f]{1,3})\)\);", script) if tmpStep: step = eval(tmpStep[0]) else: step = 128 printDBG("----> step: %s -> %s" % (tmpStep[0], step)) post_key = re.findall("'data':{'(_[0-9a-zA-Z]{10,20})':'ok'", script) if post_key: post_key = post_key[0] printDBG("post_key : '%s'" % post_key) else: printDBG("Not found post_key ... check code") return tmpVar = re.findall("(var " + self.varconst + "a=\[.*?\];)", script) if tmpVar: wordList = [] var_list = tmpVar[0].replace('var ' + self.varconst + 'a=', 'wordList=').replace( "];", "]").replace(";", "|") printDBG("-----var_list-------") printDBG(var_list) exec(var_list) printDBG(script) # search for second list of vars tmpVar2 = re.findall( ";" + self.varconst + "c\(\);(var .*?)\$\('\*'\)", script, re.S) if tmpVar2: printDBG("------------") printDBG(tmpVar2[0]) threeListNames = re.findall( "var (_[a-zA-z0-9]{4,8})=\[\];", tmpVar2[0]) printDBG(str(threeListNames)) for n in range(0, len(threeListNames)): tmpVar2[0] = tmpVar2[0].replace( threeListNames[n], "charList%s" % n) printDBG("-------tmpVar2-----") printDBG(tmpVar2[0]) # substitutions of terms from first list printDBG("------------ len(wordList) %s" % len(wordList)) for i in range(0, len(wordList)): r = self.varconst + "b('0x{0:x}')".format(i) printDBG('rrrrrrrrrrrrrr=' + r) j = i + step while j >= len(wordList): j = j - len(wordList) tmpVar2[0] = tmpVar2[0].replace( r, "'%s'" % wordList[j]) var2_list = tmpVar2[0].split(';') printDBG("------------ var2_list %s" % str(var2_list)) charList0 = {} charList1 = {} charList2 = {} for v in var2_list: if v.startswith('charList'): exec(v) bigString = '' for i in range(0, len(charList2)): if charList2[i] in charList1: bigString = bigString + charList1[charList2[i]] printDBG("------------ bigString %s" % bigString) sts, data = self.cm.getPage(main_url + "/cv.php", http_params) zone = self.cm.ph.getSearchGroups( data, '''name=['"]zone['"] value=['"]([^'^"]+?)['"]''' )[0] rb = self.cm.ph.getSearchGroups( data, '''name=['"]rb['"] value=['"]([^'^"]+?)['"]''')[0] printDBG("------------ zone[%s] rb[%s]" % (zone, rb)) cv_url = main_url + "/cv.php?verify=" + bigString postData = {post_key: 'ok'} AJAX_HEADER = { 'Accept': '*/*', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Origin': self.cm.getBaseUrl(url), 'Referer': url, 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest' } sts, ret = self.cm.getPage( cv_url, { 'header': AJAX_HEADER, 'cookiefile': COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True }, postData) if sts: printDBG("------------ ret[%s]" % ret) if 'ok' in ret: if '?' in url: url2 = url + "&r" else: url2 = url + "?r" # retry to load the page GetIPTVSleep().Sleep(1) http_params['header']['Referer'] = url sts, data = self.cm.getPage(url2, http_params) #printDBG('Data1='+data) urlTab = [] url3 = re.findall("<source src=[\"'](.*?)[\"']", data) if url3: printDBG("------------ url3 %s" % url3) url3 = self.cm.getFullUrl(url3[0], self.cm.getBaseUrl(url)) if 'mp4' in url3: urlTab.append((url3, '0')) else: urlTab.append((url3, '3')) return urlTab
def showhosts(self,cItem): urlTab = [] lng=cItem.get('lng','eng') elm=cItem['elm'] if lng=='eng': printDBG('start showhosts'+str(elm)) streams=elm.get('streams',[]) if streams==[]: streams=elm.get('tvdb_id','') URL=self.MAIN_URL+'/episode/'+streams sts, data = self.getPage(URL,self.defaultParams) if sts: data = json_loads(data) streams = data.get('streams',[]) Image = cItem['icon'] stream_lst=[] titre_ = cItem['title'].replace('>> ','') if ' - '+tscolor('\c0000????') in titre_: titre_=titre_.split(' - '+tscolor('\c0000????'))[0] for elm1 in streams: stream = elm1['stream'] name = elm1.get('source','!!') type_ = elm1.get('type',0) if (stream not in stream_lst): stream_lst.append(stream) if type_ in [9004]: Name = 'MOMOMESH [Multi]' Url = stream Type_ = 'Momomesh' Desc = 'Multi Hosts' Image = 'https://momomesh.tv/wp-content/uploads/2020/07/momomesh.logo_.hd_.png' elif type_ in [9000]: Name = 'GDDB [Multi]' Url = stream Type_ = 'GDDB' Desc = 'Multi Hosts' Image = 'https://cdn.domestika.org/c_fill,dpr_2.0,h_96,t_base_params.format_jpg,w_96/v1311703747/avatars/000/054/289/54289-original.jpg' elif type_ in [9001]: Name = 'OpenloadMovies [Multi]' Url = stream Type_ = 'openloadmovies' Desc = 'Multi Hosts' Image = 'https://openloadmovies.cam/wp-content/uploads/2020/02/openload-movie-desktop.png' elif type_ in [61,62,63]: Name = 'Gomostream [Multi]' Url = stream Type_ = 'Gomostream' Desc = 'Multi Hosts' Image = 'https://i.ibb.co/TH2kJm6/putstream.png' elif type_ in [98,99,64,93]: Name = 'VIP ['+name.replace('VIP','').strip()+']' Url = stream Type_ = 'VIP' Type_ = 'Non' Desc = 'Direct Links' Image = 'https://i.ibb.co/L6vctvL/VIP-1024x672.png' elif type_ in [39]: Name = 'Vidcloud [Multi]' Url = stream Type_ = 'Vidcloud' Desc = 'Multi Hosts' Image = 'https://vidcloud.icu/img/logo_vid.png?1' elif type_ in [27,28,29,30,31]: Name = 'Youtube' Url = stream Type_ = 'Resolve' Desc = 'Youtube' elif type_ in [77]: Name = '123Movies [Multi]' Url = stream Type_ = '123Movies' Desc = 'Multi Hosts' Image = 'https://i.ibb.co/kQGDxLc/x123.png' elif type_ in [20]: Name = 'Vidlink [Multi]' Url = stream Type_ = 'Vidlink' Desc = 'Multi Hosts\nM3u8 server\nWith subs' Image = 'https://i.ibb.co/bJwn22Q/vidlink.png' elif type_ in [71]: Name = 'Seehd.pl [Multi]' Url = stream Type_ = 'Seehd' Desc = 'Multi Hosts' Image = 'https://i.ibb.co/mJymy06/seehd.png' elif type_ in [73]: Name = 'Chillax [Multi]' Url = stream Type_ = 'Chillax' Desc = '---- > '+tscolor('\c00????00')+'Work only in eplayer3 '+tscolor('\c0000??00')+'WITH BUFFERING '+tscolor('\c00??????')+'<----' Desc = Desc + '\n'+tscolor('\c00??????')+'Good M3u8 Server\nMust wait for cloudflare bypass' Image = 'https://i.ibb.co/r2kKX5s/chillax.png' elif type_ in [11]: Name = 'DB-Media [Multi]' Url = stream Type_ = 'DB-Media' Desc = 'Multi Hosts, Need To Resolve captcha' Image = 'https://i.ibb.co/6tTVtx5/Logo-d-B-Media.png' elif type_ in [2,4,9,6]: printDBG('eeeeeeeeeeeeeee'+str(elm1)) Name = gethostname(stream).capitalize() Url = stream Type_ = 'Resolve' Desc = stream elif type_ in [48,43,]: #48:pl ,43:kisscartoon,49:tvz | Cloudflare Name = '|'+str(type_)+'| '+name Url = stream Type_ = 'Non'#'na' Desc = stream else: printDBG('eeeeeeeeeeeeeee'+str(elm1)) Name = '|'+str(type_)+'| '+name Url = stream if os.path.exists('/usr/lib/enigma2/python/Plugins/tsiplayer'): Type_ = 'na' Name = Name + ' <---------- NEW' Image = 'https://i.ibb.co/Q8ZRP0X/yaq9y3ab.png' else: Type_ = 'Non'#'na' Desc = stream if Type_ != 'Non': printDBG('elm'+str(elm1)) self.addVideo({'import':cItem['import'],'category' : 'host2','url': Url,'title':titre_,'desc':tscolor('\c00????00')+Name+tscolor('\c00??????')+'\n'+Desc,'icon':Image,'hst':'tshost','Type_':Type_} ) elif lng=='ar': hsts = ['host_egybest','host_faselhd','host_akoam','host_akwam','host_movs4u','host_cima4u','host_arablionz','host_arabseed'] hsts1 = [] if IsPython3(): for hst in hsts: Extra = 'from tsiplayer.'+hst+' import ' str_ch = elm.get('title','') year = cItem.get('year','') _thread.start_new_thread( self.get_results, (Extra,str_ch,year,) ) GetIPTVSleep().Sleep(5) else: threads = [] for hst in hsts: Extra = 'from tsiplayer.'+hst+' import ' str_ch = elm.get('title','') year = cItem.get('year','') threads.append(TsThread(self.get_results,Extra,str_ch,year)) for i in threads: i.start() i.join(timeout=4)
def getVideoLinks(self, baseUrl): printDBG("AkoAm.getVideoLinks [%s]" % baseUrl) baseUrl = strwithmeta(baseUrl) urlTab = [] # mark requested link as used one if len(self.cacheLinks.keys()): for key in self.cacheLinks: for idx in range(len(self.cacheLinks[key])): if baseUrl in self.cacheLinks[key][idx]['url']: if not self.cacheLinks[key][idx]['name'].startswith( '*'): self.cacheLinks[key][idx][ 'name'] = '*' + self.cacheLinks[key][idx][ 'name'] + '*' break if 1 != self.up.checkHostSupport(baseUrl): paramsUrl = {'header': dict(self.HTTP_HEADER)} paramsUrl['header']['Referer'] = baseUrl.meta.get( 'Referer', self.getMainUrl()) paramsUrl['max_data_size'] = 0 try: self.cm.clearCookie(self.COOKIE_FILE, removeNames=['golink']) paramsUrl['use_new_session'] = True self.getPage(baseUrl, paramsUrl) paramsUrl.pop('use_new_session') cUrl = self.cm.meta['url'] data = self.cm.getCookieItems(self.COOKIE_FILE) if 'golink' in data: data = data['golink'] printDBG(data) data = urllib.unquote(data) data = byteify(json.loads(data)) printDBG(data) baseUrl = data['route'] paramsUrl = dict(self.defaultParams) paramsUrl['header'] = dict(self.HTTP_HEADER) paramsUrl['header']['Referer'] = cUrl sts, data = self.getPage(baseUrl, paramsUrl) if sts: cUrl = data.meta['url'] url = self.getFullUrl( self.cm.ph.getSearchGroups( data, '''<iframe[^>]+?src=['"]([^"^']+?)['"]''', 1, True)[0]) if url == '': time = self.cleanHtmlStr( self.cm.ph.getDataBeetwenNodes( data, ('<div', '>', 'timerHolder'), ('</div', '>'), False)[1]) GetIPTVSleep().Sleep(int(time)) paramsUrl = dict(self.defaultParams) paramsUrl['header'] = dict(self.AJAX_HEADER) paramsUrl['header']['Referer'] = cUrl sts, data = self.getPage(cUrl, paramsUrl, {}) if sts: printDBG(data) data = byteify(json.loads(data)) urlTab.append({ 'name': 'direct_link', 'url': self.getFullUrl(data['direct_link']) }) else: baseUrl = strwithmeta(url, {'Referer': cUrl}) except Exception: printExc() urlTab.extend(self.up.getVideoLinkExt(baseUrl)) return urlTab
def _unshorten_linkbucks(self, uri): ''' (Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase. This has necessidated a license change. ''' r = requests.get(uri, headers=HTTP_HEADER, timeout=self._timeout) firstGet = time.time() baseloc = r.url if "/notfound/" in r.url or \ "(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.text: return uri, 'Error: Link not found or requires a survey!' link = None content = r.text regexes = [ r"<div id=\"lb_header\">.*?/a>.*?<a.*?href=\"(.*?)\".*?class=\"lb", r"AdBriteInit\(\"(.*?)\"\)", r"Linkbucks\.TargetUrl = '(.*?)';", r"Lbjs\.TargetUrl = '(http://[^<>\"]*?)'", r"src=\"http://static\.linkbucks\.com/tmpl/mint/img/lb\.gif\" /></a>.*?<a href=\"(.*?)\"", r"id=\"content\" src=\"([^\"]*)", ] for regex in regexes: if self.inValidate(link): link = find_in_text(regex, content) if self.inValidate(link): match = find_in_text(r"noresize=\"[0-9+]\" src=\"(http.*?)\"", content) if match: link = find_in_text(r"\"frame2\" frameborder.*?src=\"(.*?)\"", content) if self.inValidate(link): scripts = re.findall( "(<script type=\"text/javascript\">[^<]+</script>)", content) if not scripts: return uri, "No script bodies found?" js = False for script in scripts: # cleanup script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script) if re.search( r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script): js = script if not js: return uri, "Could not find correct script?" token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js) if not token: token = find_in_text(r"\?t=([a-f0-9]{40})", js) assert token authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y" l1 = find_in_text( r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js) l2 = find_in_text( r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);", js) if any([not l1, not l2, not token]): return uri, "Missing required tokens?" authkey = int(l1) + int(l2) p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token)) r2 = requests.get(p1_url, headers=HTTP_HEADER, timeout=self._timeout, cookies=r.cookies) p1_url = urljoin( baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1)) r2_1 = requests.get(p1_url, headers=HTTP_HEADER, timeout=self._timeout, cookies=r.cookies) time_left = 5.033 - (time.time() - firstGet) GetIPTVSleep().Sleep(max(time_left, 0)) p3_url = urljoin( baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false". format(tok=token, key=str(authkey))) r3 = requests.get(p3_url, headers=HTTP_HEADER, timeout=self._timeout, cookies=r2.cookies) resp_json = json_loads(r3.text) if "Url" in resp_json: return resp_json['Url'], r3.status_code return "Wat", "wat"