def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: try: pattern = r"""{\s*vid:\s*'([^']+)',\s*hash\s*:\s*["\']([\da-f]{32})""" id, hash = re.findall(pattern, html)[0] hash_code = ''.join((self.encode_base_n(int(hash[lb:lb + 8], 16), 36) for lb in range(0, 32, 8))) load_url = 'https://www.eporner.com/xhr/video/%s?hash=%s&device=generic&domain=www.eporner.com&fallback=false&embed=false&supportedFormats=mp4' % (id, hash_code) headers.update({'Referer': web_url}) r = self.net.http_GET(load_url, headers=headers).content r = r.replace("\/", "/") r = json.loads(r).get("sources", {}).get('mp4', {}) sources = [(i, r[i].get("src")) for i in r] if len(sources) > 1: try: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: common.logger.log_debug('Scrape sources sort failed |int(re.sub("\D", "", x[0])|') return helpers.pick_source(sources) + helpers.append_headers(headers) except: raise ResolverError('File not found') raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: vars = re.findall('var\s+(.+?)\s*=\s*(.+?);', html) links = re.findall('quality_(\d+)p\s*=\s*(.+?);', html) if links: sources = [] for source in links: try: link = [i.strip() for i in source[1].split('+')] link = [i for i in link if i.startswith('*/')] link = [re.sub('^\*/', '', i) for i in link] link = [(i, [x[1] for x in vars if x[0] == i]) for i in link] link = [i[1][0] if i[1] else i[0] for i in link] link = ''.join(link) link = re.sub('\s|\+|\'|\"', '', link) sources.append([source[0], link]) except: continue return helpers.pick_source(sources) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': web_url} response = self.net.http_GET(web_url, headers=headers) html = response.content sources = [] for r in re.finditer('''href=["']?(?P<url>[^"']+)["']?>DOWNLOAD <span>(?P<label>[^<]+)''', html, re.DOTALL): match = r.groupdict() stream_url = match['url'].replace('&', '&') label = match.get('label', '0') sources.append([label, stream_url]) if len(sources) > 1: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: try: headers.update({'Referer': web_url}) flashvars = re.search('''var flashvars\s*=\s*({.+?});''', html).groups()[0] r = json.loads(flashvars.replace("\/", "/")).get("mediaDefinition", {}) sources = [(i.get("quality"), i.get("videoUrl")) for i in r] if sources: return helpers.pick_source(sources) + helpers.append_headers(headers) except: raise ResolverError('File not found') raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: video_id = re.search("""playvideo\.php\?id=(\d+)""", html) if video_id: video_url = 'http://%s/jwplayer/playvideo.php?id=%s' % (host, video_id.group(1)) headers.update({'Referer': web_url}) _html = self.net.http_GET(video_url, headers=headers).content if _html: try: _html = jsunpack.unpack(_html) except Exception as e: raise ResolverError(e) sources = helpers.scrape_sources(_html, patterns=['''file:\s*["'](?P<url>http[^"']+)''']) if sources: return helpers.pick_source(sources) + helpers.append_headers(headers) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: if 'sucuri_cloudproxy_js' in html: cookie = self.sucuri(html) headers.update({'Referer': web_url, 'Cookie': cookie}) html = self.net.http_GET(web_url, headers=headers).content sources = re.findall('''<source\s*.+?label=['"](\w+)['"]\s*src=['"]([^'"]+)''', html) sources = [(i[0], i[1]) for i in sources if not i[1] == "dead_link"] if sources: try: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: pass return helpers.pick_source(sources) + helpers.append_headers(headers) raise ResolverError('File not found')
def get_media_url(self, host, media_id): headers = {'User-Agent': common.RAND_UA} web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url, headers=headers).content if html: try: if media_id.startswith('embed/'): web_url = re.search('''link:\s*["']([^"']+)''', html).groups()[0] html = self.net.http_GET(web_url, headers=headers).content sources = re.findall('''['"]?file['"]?:\s*['"]([^'"]+).+?['"]?type['"]?:\s*['"]([^'"]+)''', html) if sources: sources = [(i[1], i[0]) for i in sources] return self.net.http_GET(helpers.pick_source(sources), headers=headers).get_url() + helpers.append_headers(headers) except Exception as e: raise ResolverError(e) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: try: pattern = r"""ajax\(url,opts\);}}\)\(([\d]+),[\d]+,\[([\d,]+)\]\);""" url_id, quals = re.findall(pattern, html)[0] quals = quals.replace(',','+') headers.update({'Referer': web_url, 'Origin': host}) post_url = 'https://tkn.kodicdn.com/0000000%s/desktop/%s' % (url_id, quals) html = self.net.http_POST(post_url, headers=headers, form_data='').content if html: sources = helpers.scrape_sources(html, patterns=["""['"](?P<label>\d+)['"]:{[\w":,]+token['"]:['"](?P<url>[^'"]+)"""]) if sources: return helpers.pick_source(sources) + helpers.append_headers(headers) except: raise ResolverError('File not found') raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: iframe_url = re.search("""<iframe.+?src=["'](http://sexix\.net/v\.php\?(u=.+?))['"]""", html) if iframe_url: playlist_url = 'http://sexix.net/qaqqew/playlist.php?%s' % iframe_url.group(2) headers.update({'Referer': iframe_url.group(1)}) _html = self.net.http_GET(playlist_url, headers=headers).content if _html: sources = re.findall("""source file=["']([^"']+).+?label=["']([^"']+)""", _html) if sources: sources = [(i[1], i[0]) for i in sources] try: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: pass headers.update({'Referer': web_url}) return helpers.pick_source(sources) + helpers.append_headers(headers) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: try: params = "".join([x.replace("' + '", "") for x in self.between(html, "params += '", "';")]) vkey = params.split('=')[-1] m = hashlib.md5() m.update(vkey + 'PT6l13umqV8K827') params += '&pkey=%s' % m.hexdigest() params = urllib.unquote(params) url = 'http://www.drtuber.com/player_config/?' + params sources_html = self.net.http_GET(url, headers=headers).content if sources_html: sources = helpers.scrape_sources(sources_html, patterns=["""video_file>\<\!\[CDATA\[(?P<url>[^\]]+)"""]) if sources: return helpers.pick_source(sources) + helpers.append_headers(headers) except: raise ResolverError('File not found') raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} r = self.net.http_GET(web_url, headers=headers) if r.get_url() != web_url: host = re.findall(r'(?://|\.)([^/]+)', r.get_url())[0] web_url = self.get_url(host, media_id) headers.update({'Referer': web_url}) api_url = 'https://{0}/api/source/{1}'.format(host, media_id) r = self.net.http_POST(api_url, form_data={ 'r': '', 'd': host }, headers=headers) if r.get_url() != api_url: api_url = 'https://www.{0}/api/source/{1}'.format(host, media_id) r = self.net.http_POST(api_url, form_data={ 'r': '', 'd': host }, headers=headers) js_result = r.content if js_result: js_data = json.loads(js_result) if js_data.get('success'): sources = [(i.get('label'), i.get('file')) for i in js_data.get('data') if i.get('type') == 'mp4'] common.logger.log(sources) sources = helpers.sort_sources_list(sources) rurl = helpers.pick_source(sources) str_url = self.net.http_HEAD(rurl, headers=headers).get_url() return str_url + helpers.append_headers(headers) raise ResolverError('Video not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'User-Agent': common.RAND_UA, 'Origin': 'https://www.dailymotion.com', 'Referer': 'https://www.dailymotion.com/' } js_result = json.loads( self.net.http_GET(web_url, headers=headers).content) if js_result.get('error'): raise ResolverError(js_result.get('error').get('title')) quals = js_result.get('qualities') if quals: mbtext = self.net.http_GET(quals.get('auto')[0].get('url'), headers=headers).content sources = re.findall( 'NAME="(?P<label>[^"]+)",PROGRESSIVE-URI="(?P<url>[^#]+)', mbtext) return helpers.pick_source(helpers.sort_sources_list( sources)) + helpers.append_headers(headers) raise ResolverError('No playable video found.')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) api_url = 'https://streamlare.com/api/video/get' headers = { 'User-Agent': common.FF_USER_AGENT, 'Referer': web_url, 'X-Requested-With': 'XMLHttpRequest' } data = {'id': media_id} html = self.net.http_POST(api_url, headers=headers, form_data=data, jdata=True).content items = json.loads(html).get('result') sources = [('540p' if item == 'Original' else item, items.get(item).get('src')) for item in items.keys()] if sources: headers.pop('X-Requested-With') sources.sort(key=lambda x: int(x[0][:-1]), reverse=True) return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError('File Not Found or removed')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) rurl = 'https://{0}/'.format(host) headers = { 'User-Agent': common.FF_USER_AGENT, 'Origin': rurl[:-1], 'Referer': rurl } data = { 'op': 'download1', 'usr_login': '', 'id': media_id, 'referer': rurl, 'method_free': 'Free Download' } html = self.net.http_POST(web_url, form_data=data, headers=headers).content sources = helpers.scrape_sources(html) if sources: return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError('File Not Found or removed')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA, 'Referer': web_url, 'Origin': 'https://{0}'.format(host), 'X-Requested-With': 'XMLHttpRequest'} data = {'id': media_id} api_url = 'https://{0}/api/video/get'.format(host) html = self.net.http_POST(api_url, data, headers=headers, jdata=True).content r = json.loads(html).get('result').get('playlist') if r: data = json.loads(r) ct = data.get('ct') salt = codecs.decode(data.get('s'), 'hex') murl = json.loads(jscrypto.decode(ct, '2021', salt)) headers.pop('X-Requested-With') html = self.net.http_GET(murl, headers=headers).content sources = re.findall(r'RESOLUTION=\d+x(?P<label>[\d]+).*\n(?!#)(?P<url>[^\n]+)', html, re.IGNORECASE) if sources: stream_url = urllib_parse.urljoin(murl, helpers.pick_source(helpers.sort_sources_list(sources))) return stream_url + helpers.append_headers(headers) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': web_url} download_serv = json.loads( self.net.http_GET('https://apiv2.' + host + '/getServer?c=' + media_id, headers=headers).content) if (download_serv['status'] == 'ok'): download_url = json.loads( self.net.http_GET( 'https://' + download_serv['data']['server'] + '.' + host + '/getUpload?c=' + media_id, headers=headers).content) sources = [] if (download_url['data']['files']): for file_index in download_url['data']['files']: url = urllib_parse.quote( download_url['data']['files'][file_index]['link'], ':/') size = download_url['data']['files'][file_index]['size'] sources += [(size, url)] return helpers.pick_source(sources, False) raise ResolverError('Unable to locate video')
def get_media_url(self, host, media_id): url = self.get_url(host, media_id) result = json.loads(self.net.http_GET(url, headers=self.headers).content) if result.get('message') == 'Success': js_data = result.get('data') if 'streamLinks' in js_data.keys(): js_result = result else: heading = i18n('uptobox_auth_header') line1 = i18n('auth_required') line2 = i18n('upto_link').format(js_data.get('base_url')) line3 = i18n('upto_pair').format(js_data.get('pin')) with common.kodi.CountdownDialog(heading, line1, line2, line3, True, js_data.get('expired_in'), 10) as cd: js_result = cd.start(self.__check_auth, [js_data.get('check_url')]) if js_result.get('data').get('token'): self.set_setting('token', js_result.get('data').get('token')) self.set_setting('premium', 'true') if js_result: js_result = js_result.get('data').get('streamLinks') sources = [(key, list(js_result.get(key).values())[0]) for key in list(js_result.keys())] return helpers.pick_source(helpers.sort_sources_list(sources)) + helpers.append_headers(self.headers) raise ResolverError('The requested video was not found or may have been removed.')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} html = self.net.http_GET(web_url, headers=headers).content headers.update({'Referer': web_url}) match = re.search(r"playlistUrl='([^']+)", html) if match: web_url = 'https://{0}{1}'.format(host, match.group(1)) html2 = self.net.http_GET(web_url, headers=headers).content r = json.loads(html2)[0].get('sources', None) if r: html = self.net.http_GET(r[0].get('file'), headers=headers).content sources = re.findall( r'RESOLUTION=\d+x(?P<label>[\d]+).*\n(?!#)(?P<url>[^\n]+)', html, re.IGNORECASE) if sources: return helpers.pick_source( helpers.sort_sources_list( sources)) + helpers.append_headers(headers) raise ResolverError('Video Link Not Found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'User-Agent': common.FF_USER_AGENT, 'Referer': 'https://{0}/videos/{1}'.format(host, media_id), 'X-Requested-With': 'XMLHttpRequest' } js_data = json.loads( self.net.http_GET(web_url, headers=headers).content) if js_data.get('video').get('sources'): sources = [] for item in js_data.get('video').get('sources'): sources.append((item.get('label'), item.get('file'))) source = helpers.pick_source(helpers.sort_sources_list(sources)) headers.pop('X-Requested-With') headers.update({"Range": "bytes=0-"}) de = js_data.get('de') en = js_data.get('en') return '{0}?de={1}&en={2}{3}'.format( source, de, en, helpers.append_headers(headers)) raise ResolverError('Stream not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) rurl = 'https://{0}/'.format(host) headers = {'User-Agent': common.RAND_UA, 'Referer': rurl} html = self.net.http_GET(web_url, headers=headers).content sources = re.findall(r'download_video([^"]+)[^\d]+\d+x(\d+)', html) if sources: sources.sort(key=lambda x: int(x[1]), reverse=True) sources = [(x[1] + 'p', x[0]) for x in sources] code, mode, hash = eval(helpers.pick_source(sources)) dl_url = 'https://{0}/dl?op=download_orig&id={1}&mode={2}&hash={3}'.format( host, code, mode, hash) html = self.net.http_GET(dl_url, headers=headers).content domain = base64.b64encode( (rurl[:-1] + ':443').encode('utf-8')).decode('utf-8').replace( '=', '') token = helpers.girc(html, rurl, domain) if token: payload = helpers.get_hidden(html) payload.update({'g-recaptcha-response': token}) req = self.net.http_POST(dl_url, form_data=payload, headers=headers).content r = re.search('href="([^"]+)">Direct', req) if r: return r.group(1) + helpers.append_headers(headers) else: eurl = self.get_embedurl(host, media_id) headers.update({'watchsb': 'streamsb'}) html = self.net.http_GET(eurl, headers=headers).content data = json.loads(html).get("stream_data", {}) strurl = data.get('file') or data.get('backup') if strurl: headers.pop('watchsb') return strurl + helpers.append_headers(headers) raise ResolverError('Video not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} html = self.net.http_GET(web_url, headers=headers).content if 'Not Found' in html: raise ResolverError('File Removed') if 'Video is processing' in html: raise ResolverError('File still being processed') packed = re.search(r"JuicyCodes\.Run\((.+?)\)", html, re.I) if packed: from base64 import b64decode packed = packed.group(1).replace('"', '').replace('+', '') packed = b64decode(packed.encode('ascii')) html += '%s</script>' % packed.decode('latin-1').strip() sources = helpers.scrape_sources(html) if sources: headers.update({'Referer': web_url, 'Range': 'bytes=0-'}) return helpers.pick_source(sources) + helpers.append_headers(headers) raise ResolverError('Video not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA, 'Referer': web_url} html = self.net.http_GET(web_url, headers=headers).content html = html.encode('utf-8') if helpers.PY2 else html aa_text = re.search(r"""(゚ω゚ノ\s*=\s*/`m´\s*)\s*ノ.+?;)\s*</script""", html, re.I) if aa_text: aa_decoded = aadecode.decode(aa_text.group(1)) sources = helpers.scrape_sources(aa_decoded) else: sources = helpers.scrape_sources( html, patterns=[ r'''sources:\s*\[{(?:src|file):\s*"(?P<url>[^"]+)''' ]) if sources: headers.update({'Referer': web_url}) return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError('Video not found')
def get_media_url(self, host, media_id): headers = {'User-Agent': common.FF_USER_AGENT} web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url, headers=headers).content json_data = json.loads(html).get('video_balancer', {}) if json_data: url = json_data.get('m3u8', None) if url: headers.update({'Origin': 'http://rutube.ru'}) mbtext = self.net.http_GET(url, headers=headers).content sources = re.findall( 'RESOLUTION=(?P<label>[^x]+).+\n(?P<url>[^?]+)', mbtext, re.IGNORECASE) return helpers.pick_source(helpers.sort_sources_list( sources)) + helpers.append_headers(headers) json_url = json_data.get('json') html = self.net.http_GET(json_url, headers=headers).content js_data = json.loads(html) if js_data.get('results', False): return js_data.get('results')[0] + helpers.append_headers( headers) raise ResolverError('No playable video found.')
def get_media_url(self, host, media_id, retry=False, cached_only=False): try: self.headers.update({'Authorization': 'Bearer %s' % self.get_setting('token')}) if media_id.lower().startswith('magnet:'): cached = self.__check_cache(media_id) if not cached and (self.get_setting('cached_only') == 'true' or cached_only): raise ResolverError('Real-Debrid: Cached torrents only allowed to be initiated') torrent_id = self.__add_magnet(media_id) if not torrent_id == "": torrent_info = self.__torrent_info(torrent_id) heading = 'Resolve URL Real-Debrid Transfer' line1 = torrent_info.get('filename') status = torrent_info.get('status') if status == 'magnet_conversion': line2 = 'Converting MAGNET...' line3 = '%s seeders' % torrent_info.get('seeders') _TIMEOUT = 100 # seconds with common.kodi.ProgressDialog(heading, line1, line2, line3) as cd: while status == 'magnet_conversion' and _TIMEOUT > 0: cd.update(_TIMEOUT, line1=line1, line3=line3) if cd.is_canceled(): self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid: Torrent ID %s canceled by user' % torrent_id) elif any(x in status for x in STALLED): self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid: Torrent ID %s has stalled | REASON: %s' % (torrent_id, status)) _TIMEOUT -= INTERVALS common.kodi.sleep(1000 * INTERVALS) torrent_info = self.__torrent_info(torrent_id) status = torrent_info.get('status') line1 = torrent_info.get('filename') line3 = '%s seeders' % torrent_info.get('seeders') if status == 'magnet_conversion': self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid Error: MAGNET Conversion exceeded time limit') if status == 'waiting_files_selection': _videos = [] for _file in torrent_info.get('files'): if any(_file.get('path').lower().endswith(x) for x in FORMATS): _videos.append(_file) try: _video = max(_videos, key=lambda x: x.get('bytes')) file_id = _video.get('id', 0) except ValueError: self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid Error: Failed to locate largest video file') file_selected = self.__select_file(torrent_id, file_id) if not file_selected: self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid Error: Failed to select file') else: torrent_info = self.__torrent_info(torrent_id) status = torrent_info.get('status') if not status == 'downloaded': file_size = round(float(_video.get('bytes')) / (1000 ** 3), 2) if cached: line2 = 'Getting torrent from the Real-Debrid Cloud' else: line2 = 'Saving torrent to the Real-Debrid Cloud' line3 = status with common.kodi.ProgressDialog(heading, line1, line2, line3) as pd: while not status == 'downloaded': common.kodi.sleep(1000 * INTERVALS) torrent_info = self.__torrent_info(torrent_id) line1 = torrent_info.get('filename') status = torrent_info.get('status') if status == 'downloading': line3 = 'Downloading %s GB @ %s mbps from %s peers, %s %% completed' % (file_size, round(float(torrent_info.get('speed')) / (1000**2), 2), torrent_info.get("seeders"), torrent_info.get('progress')) else: line3 = status logger.log_debug(line3) pd.update(int(float(torrent_info.get('progress'))), line1=line1, line3=line3) if pd.is_canceled(): self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid: Torrent ID %s canceled by user' % torrent_id) elif any(x in status for x in STALLED): self.__delete_torrent(torrent_id) raise ResolverError('Real-Debrid: Torrent ID %s has stalled | REASON: %s' % (torrent_id, status)) media_id = torrent_info.get('links')[0] self.__delete_torrent(torrent_id) if media_id.lower().startswith('magnet:'): self.__delete_torrent(torrent_id) # clean up just incase raise ResolverError('Real-Debrid Error: Failed to transfer torrent to/from the cloud') url = '%s/%s' % (rest_base_url, unrestrict_link_path) data = {'link': media_id} result = self.net.http_POST(url, form_data=data, headers=self.headers).content except urllib_error.HTTPError as e: if not retry and e.code == 401: if self.get_setting('refresh'): self.refresh_token() return self.get_media_url(host, media_id, retry=True) else: self.reset_authorization() raise ResolverError('Real Debrid Auth Failed & No Refresh Token') else: try: js_result = json.loads(e.read()) if 'error' in js_result: msg = js_result['error'] else: msg = 'Unknown Error (1)' except: msg = 'Unknown Error (2)' raise ResolverError('Real Debrid Error: %s (%s)' % (msg, e.code)) except Exception as e: raise ResolverError('Unexpected Exception during RD Unrestrict: %s' % e) else: js_result = json.loads(result) links = [] link = self.__get_link(js_result) if link is not None: links.append(link) if 'alternative' in js_result: for alt in js_result['alternative']: link = self.__get_link(alt) if link is not None: links.append(link) return helpers.pick_source(links)
def get_media_url(self, host, media_id): headers = {'User-Agent': common.RAND_UA, 'X-Requested-With': 'XMLHttpRequest'} html = self.net.http_GET(self.get_url(host, media_id), headers=headers).content sources = [(label, url.replace('\\/', '/')) for (url, label) in re.findall(r'"source(?:_bk)?":\[{"file":"([^"]+)","label":"([^"]+)"', html)] headers.update({'Referer': host}) return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_media_url(self, host, media_id): headers = {'User-Agent': common.RAND_UA} html = self.net.http_POST(self.get_url(host, media_id), {'d': host}, headers=headers).content sources = [(label, url.replace('\\/', '/')) for (url, label) in re.findall(r'"([^"]+)","label":"([^"]+)"', html)] return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_media_url(self, host, media_id, cached_only=False): try: if media_id.lower().startswith('magnet:'): r = re.search( '''magnet:.+?urn:([a-zA-Z0-9]+):([a-zA-Z0-9]+)''', media_id, re.I) if r: _hash = r.group(2) if self.__check_cache(_hash): logger.log_debug( 'AllDebrid: BTIH {0} is readily available to stream' .format(_hash)) transfer_id = self.__create_transfer(_hash) else: if self.get_setting( 'cached_only') == 'true' or cached_only: raise ResolverError( 'AllDebrid: Cached torrents only allowed to be initiated' ) else: transfer_id = self.__create_transfer(_hash) self.__initiate_transfer(transfer_id) transfer_info = self.__list_transfer(transfer_id) sources = [(link.get('size'), link.get('link')) for link in transfer_info.get('links') if any( link.get('filename').lower().endswith(x) for x in FORMATS)] media_id = max(sources)[1] self.__delete_transfer(transfer_id) url = '{0}/link/unlock?agent={1}&apikey={2}&link={3}'.format( api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), urllib_parse.quote_plus(media_id)) result = self.net.http_GET(url, headers=self.headers).content except urllib_error.HTTPError as e: try: js_result = json.loads(e.read()) if 'error' in js_result: msg = '{0} ({1})'.format(js_result.get('error'), js_result.get('errorCode')) else: msg = 'Unknown Error (1)' except: msg = 'Unknown Error (2)' raise ResolverError('AllDebrid Error: {0} ({1})'.format( msg, e.code)) else: js_result = json.loads(result) logger.log_debug('AllDebrid resolve: [{0}]'.format(js_result)) if 'error' in js_result: e = js_result.get('error') raise ResolverError('AllDebrid Error: {0} ({1})'.format( e.get('message'), e.get('code'))) elif js_result.get('status', False) == "success": if js_result.get('data').get('link'): return js_result.get('data').get('link') elif js_result.get('data').get('host') == "stream": sources = js_result.get('data').get('streams') fid = js_result.get('data').get('id') sources = [(str(source.get("quality")), source.get("id")) for source in sources if '+' not in source.get("id")] sid = helpers.pick_source( helpers.sort_sources_list(sources)) url = '{0}/link/streaming?agent={1}&apikey={2}&id={3}&stream={4}' \ .format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), fid, sid) result = self.net.http_GET(url, headers=self.headers).content js_data = json.loads(result) if js_data.get('data').get('link'): return js_data.get('data').get('link') raise ResolverError('AllDebrid: no stream returned')