def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'User-Agent': common.FF_USER_AGENT, 'Referer': web_url } html = self.net.http_GET(web_url, headers=headers).content tries = 0 while tries < MAX_TRIES: data = helpers.get_hidden(html) data.update(captcha_lib.do_captcha(html)) html = self.net.http_POST(web_url, data, headers=headers).content r = re.search('''class="downloadbtn"[^>]+onClick\s*=\s*\"window\.open\('([^']+)''', html) if r: return r.group(1) + helpers.append_headers(headers) if tries > 0: common.kodi.sleep(1000) tries = tries + 1 raise ResolverError('Unable to locate link')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content js_data = re.findall('(eval\(function.*?)</script>', html.replace('\n', '')) for i in js_data: try: html += jsunpack.unpack(i) except: pass match = re.findall('''["']?sources['"]?\s*:\s*\[(.*?)\]''', html) if match: stream_url = re.findall('''['"]?file['"]?\s*:\s*['"]?([^'"]+)''', match[0]) if stream_url: return stream_url[-1] raise ResolverError('File Not Found or removed')
def __get_sources(self, oid, video_id): sources_url = 'https://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id) html = self.net.http_GET(sources_url).content if html.startswith('<!--'): html = html[4:] js_data = json.loads(html) payload = [] sources = [] for item in js_data.get('payload'): if type(item) == list: payload = item if payload: for item in payload: if type(item) == dict: js_data = item.get('player').get('params')[0] for item in list(js_data.keys()): if item.startswith('url') and '.mp4' in js_data.get(item): sources.append((item[3:], js_data.get(item))) if not sources: sources = [('360', js_data.get('hls'))] return sources raise ResolverError('No video found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'Referer': web_url } headers.update(self.headers) html = self.net.http_GET(web_url, headers=headers).content sources = helpers.parse_sources_list(html) if sources: if len(sources) > 1: try: sources.sort(key=lambda x: int(re.sub("\D", '', x[0])), reverse=True) except: common.logger.log_debug('Scrape sources sort failed |int(re.sub(r"""\D""", '', x[0])|') try: vt = self.__auth_ip(media_id) if vt: params = {'direct': 'false', 'ua': 1, 'vt': vt} return helpers.pick_source(sources) + '?' + urllib.urlencode(params) + helpers.append_headers(self.headers) except urllib2.HTTPError: source = helpers.pick_source(sources) return source else: raise ResolverError('Video Token Missing')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} html = self.net.http_GET(web_url, headers=headers).content stream_url = '' match = re.search('<video.*?</video>', html, re.DOTALL) if match: links = re.findall('<source[^>]+src="([^"]+)', match.group(0), re.DOTALL) if links: stream_url = random.choice(links) if not stream_url: match = re.search('fkzd="([^"]+)', html) if match: query = { 'pass': '******', 'key': match.group(1), 'cid3': 'undefined', 'cid': 0, 'numOfErrors': 0, 'file': media_id, 'cid2': 'undefined', 'user': '******' } api_url = 'http://www.wholecloud.net//api/player.api.php?' + urllib.urlencode( query) html = self.net.http_GET(api_url, headers=headers).content match = re.search('url=([^&]+)', html) if match: stream_url = match.group(1) if stream_url: headers.update({ 'Referer': web_url, }) return stream_url + helpers.append_headers(headers) else: raise ResolverError('File Not Found or removed')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'User-Agent': common.RAND_UA, 'Referer': web_url.replace("iframe-", "preview-") } html = self.net.http_GET(web_url, headers=headers).content if html: var = re.search('var _0x[0-f]+=\[([^;]+)\];', html) sources = helpers.scrape_sources( html, patterns=['''src\s*:\s*["'](?P<url>http[^"']+\.mp4)["']''']) if sources and var: headers.update({'Referer': web_url}) sources = [(source[0], S(var.group(1)).decode(source[1])) for source in sources] return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'User-Agent': common.RAND_UA, 'Referer': web_url.replace("iframe-", "preview-") } html = self.net.http_GET(web_url, headers=headers).content if html: sources = helpers.scrape_sources( html, patterns=['''src\s*:\s*["'](?P<url>[^"']+)''']) data = re.findall("""_[^=]+=\[([^\]]+)\];""", html, re.DOTALL) if sources and data: data = data[3].replace('\\x', '').split(",") data = [ x.replace('"', '').replace(' ', '').decode("hex") for x in data ] key = "".join(data[7:9]) if key.startswith("embed"): key = key[6:] + key[:6] i = 0 headers.update({'Referer': web_url}) for source in sources: try: src = urlparse.urlparse(source[1]) l = list(src) b = l[2].split("/")[1:] b[0] = self.decrypt(b[0], key) l[2] = "/".join(b) sources[i] = (source[0], urlparse.urlunparse(l)) i += 1 except: i += 1 return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError('File not found')
def _parse_google(self, link): sources = [] response = None if re.match('https?://get[.]', link): if link.endswith('/'): link = link[:-1] vid_id = link.split('/')[-1] response = self.net.http_GET(link) sources = self.__parse_gget(vid_id, response.content) elif re.match('https?://plus[.]', link): response = self.net.http_GET(link) sources = self.__parse_gplus(response.content) elif 'drive.google' in link or 'docs.google' in link: link = link.replace("/preview", "/edit") response = self.net.http_GET(link) sources = self._parse_gdocs(response.content) elif 'youtube.googleapis.com' in link: cid = re.search('cid=([\w]+)', link) try: link = 'https://drive.google.com/file/d/%s/edit' % cid.groups(1) except: raise ResolverError('ID not found') response = self.net.http_GET(link) sources = self._parse_gdocs(response.content) return response, sources
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url, headers={'Referer': web_url}).content data = helpers.get_hidden(html) html = self.net.http_POST(web_url, data, headers=({ 'Referer': web_url, 'X-Requested-With': 'XMLHttpRequest' })).content r = re.search(r'class="stream-content" data-url', html) if not r: raise ResolverError('page structure changed') r = re.findall(r'data-url="?(.+?)"', html) stream_url = r[0] + '|' + urllib.urlencode( {'User-Agent': common.IE_USER_AGENT}) return stream_url
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) r = self.net.http_GET(web_url, headers=self.desktopHeaders) if r._response.code == 200: sources = helpers.scrape_sources( r.content, generic_patterns=False, patterns=['''sources.*?\[['"](?P<url>.*?)['"]'''] ) if sources: # Headers for requesting media (copied from Firefox). parsedUrl = urlparse(r.get_url()) kodiHeaders = { 'User-Agent': self.net.get_user_agent(), 'Accept': 'video/webm,video/ogg,video/*;q=0.9,application/ogg;q=0.7,audio/*;q=0.6,*/*;q=0.5', 'Referer': '%s://%s/' % (parsedUrl.scheme, parsedUrl.netloc), 'Cookie': '; '.join( header.replace('Set-Cookie: ', '').split(';', 1)[0] for header in r.get_headers() if header.startswith('Set-Cookie') ) } return helpers.pick_source(sources) + helpers.append_headers(kodiHeaders) raise ResolverError('Unable to locate video')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) response = self.net.http_GET(web_url) html = response.content if html: packed = re.search('(eval\(function.*?)\s*</script>', html, re.DOTALL) if packed: js = jsunpack.unpack(packed.group(1)) else: js = html sources = re.findall('''file\s*:\s*["']([^"']+\.(?:(m3u8|mp4)))''', js) if sources: headers = {'User-Agent': common.RAND_UA, 'Referer': web_url} sources = [(b, a) for a, b in sources] return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError('No playable video found.')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) stream_url = '' html = self.net.http_GET(web_url).content try: r = re.search('flashvars.filekey=(.+?);', html) if r: r = r.group(1) try: filekey = re.compile('\s+%s="(.+?)"' % r).findall(html)[-1] except: filekey = r player_url = 'http://www.wholecloud.net/api/player.api.php?key=%s&file=%s' % ( filekey, media_id) html = self.net.http_GET(player_url).content r = re.search('url=(.+?)&', html) if r: stream_url = r.group(1) except: print "no embedded urls found using first method" try: r = re.search('id="player".*?src="(.*?)"', html, re.DOTALL) if r: stream_url = r.group(1) except: print "no embedded urls found using second method" if stream_url: return '%s%s' % (stream_url, '|Referer=' + web_url) else: raise ResolverError('File Not Found or removed')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} r = self.net.http_GET(web_url, headers=headers) if r.get_url() != web_url: host = re.findall(r'(?://|\.)([^/]+)', r.get_url())[0] web_url = self.get_url(host, media_id) headers.update({'Referer': web_url}) api_url = 'https://{0}/api/source/{1}'.format(host, media_id) r = self.net.http_POST(api_url, form_data={ 'r': '', 'd': host }, headers=headers) if r.get_url() != api_url: api_url = 'https://www.{0}/api/source/{1}'.format(host, media_id) r = self.net.http_POST(api_url, form_data={ 'r': '', 'd': host }, headers=headers) js_result = r.content if js_result: js_data = json.loads(js_result) if js_data.get('success'): sources = [(i.get('label'), i.get('file')) for i in js_data.get('data') if i.get('type') == 'mp4'] common.logger.log(sources) sources = helpers.sort_sources_list(sources) rurl = helpers.pick_source(sources) str_url = self.net.http_HEAD(rurl, headers=headers).get_url() return str_url + helpers.append_headers(headers) raise ResolverError('Video not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'Referer': web_url, 'User-Agent': common.CHROME_USER_AGENT} player_headers = { 'Cookie': 'PHPSESSID=1', 'Referer': web_url, 'User-Agent': common.CHROME_USER_AGENT } player_headers.update(headers) html = self.net.http_GET(web_url, headers=headers).content try: html = html.encode('utf-8') except: pass match = re.findall('data-quality="(.*?)" href="(.*?)".*?>(.*?)</a>', html, re.DOTALL) if match: mylinks = sorted(match, key=lambda x: x[2]) html = self.net.http_GET(mylinks[-1][1], headers=headers).content from HTMLParser import HTMLParser match = re.search('''['"]file['"]:\s*['"](.+?)['"]''', HTMLParser().unescape(html)) if match: mylink = match.group(1).replace("\\", "") return self.__check_vid(mylink) + helpers.append_headers( player_headers) html = jsunpack.unpack( re.search("eval(.*?)\{\}\)\)", html, re.DOTALL).group(1)) match = re.search('src="(.*?\.mp4)"', html) if match: return self.__check_vid( match.group(1)) + helpers.append_headers(player_headers) raise ResolverError('Video Link Not Found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content tries = 0 while tries < MAX_TRIES: data = helpers.get_hidden(html) data['method_free'] = 'Free+Download' data.update(captcha_lib.do_captcha(html)) headers = { 'Referer': web_url } html = self.net.http_POST(web_url, data, headers=headers).content if tries > 0: xbmc.sleep(6000) if '>File Download Link Generated<' in html: r = re.search("onClick\s*=\s*\"window\.open\('([^']+)", html) if r: return r.group(1) + '|' + urllib.urlencode({'User-Agent': common.IE_USER_AGENT}) tries = tries + 1 raise ResolverError('Unable to locate link')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content match = re.search('videoObject\s*=\s*(.*?});', html) if match: try: js_data = json.loads(match.group(1)) except: js_data = {} streams = js_data.get('files', {}) sources = [(stream.get('height', 'Unknown'), stream['url']) for _key, stream in streams.iteritems()] sources = [ (label, 'https:' + stream_url) if stream_url.startswith('//') else (label, stream_url) for label, stream_url in sources ] sources.sort(key=lambda x: x[0], reverse=True) return helpers.pick_source(sources) + helpers.append_headers( headers) else: raise ResolverError('JSON Not Found')
def sucuri(self, html): try: import base64 self.cookie = None s = re.compile("S\s*=\s*'([^']+)").findall(html)[0] s = base64.b64decode(s) s = s.replace(' ', '') s = re.sub('String\.fromCharCode\(([^)]+)\)', r'chr(\1)', s) s = re.sub('\.slice\((\d+),(\d+)\)', r'[\1:\2]', s) s = re.sub('\.charAt\(([^)]+)\)', r'[\1]', s) s = re.sub('\.substr\((\d+),(\d+)\)', r'[\1:\1+\2]', s) s = re.sub(';location.reload\(\);', '', s) s = re.sub(r'\n', '', s) s = re.sub(r'document\.cookie', 'cookie', s) cookie = '' exec(s) self.cookie = re.compile('([^=]+)=(.*)').findall(cookie)[0] self.cookie = '%s=%s' % (self.cookie[0], self.cookie[1]) return self.cookie except: raise ResolverError('Could not decode sucuri')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = { 'User-Agent': common.FF_USER_AGENT, 'Referer': 'https://{0}/videos/{1}'.format(host, media_id), 'X-Requested-With': 'XMLHttpRequest' } js_data = json.loads( self.net.http_GET(web_url, headers=headers).content) if js_data.get('video').get('sources'): sources = [] for item in js_data.get('video').get('sources'): sources.append((item.get('label'), item.get('file'))) source = helpers.pick_source(helpers.sort_sources_list(sources)) headers.pop('X-Requested-With') headers.update({"Range": "bytes=0-"}) de = js_data.get('de') en = js_data.get('en') return '{0}?de={1}&en={2}{3}'.format( source, de, en, helpers.append_headers(headers)) raise ResolverError('Stream not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} response = self.net.http_GET(web_url, headers=headers) html = response.content if re.search('>(File Not Found)<', html): raise ResolverError('File Not Found or removed') cnt = 10 match = re.search('count\s*=\s*(\d+);', html) if match: cnt = int(match.group(1)) cnt += 1 data = helpers.get_hidden(html) headers.update({'Referer': web_url}) common.kodi.sleep(cnt * 1000) html = self.net.http_POST(response.get_url(), form_data=data, headers=headers).content sources = helpers.scrape_sources( html, patterns=['''file\s*:\s*["'](?P<url>[^"']+)''']) return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content tries = 0 while tries < MAX_TRIES: data = helpers.get_hidden(html) data['method_free'] = 'Free+Download+>>' data.update(captcha_lib.do_captcha(html)) headers = {'Referer': web_url} common.log_utils.log_debug(data) html = self.net.http_POST(web_url, data, headers=headers).content if tries > 0: xbmc.sleep(6000) if 'File Download Link Generated' in html: r = re.search('href="([^"]+)[^>]>Download<', html, re.I) if r: return r.group(1) + '|' + urllib.urlencode( {'User-Agent': common.IE_USER_AGENT}) tries = tries + 1 raise ResolverError('Unable to locate link')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content tries = 0 while tries < MAX_TRIES: data = helpers.get_hidden(html) data.update(captcha_lib.do_captcha(html)) html = self.net.http_POST(web_url, form_data=data).content html += helpers.get_packed_data(html) match = re.search('name="src"\s*value="([^"]+)', html) if match: return match.group(1) # try to find source in html match = re.search('<span[^>]*>\s*<a\s+href="([^"]+)', html, re.DOTALL) if match: return match.group(1) tries += 1 raise ResolverError('Unable to resolve kingfiles link. Filelink not found.')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': web_url} form_data = { 'op': 'download2', 'id': media_id, 'rand': '', 'referer': web_url, 'method_free': 'Free Download', 'method_premium': '', 'adblock_detected': '0' } html = self.net.http_POST(web_url, form_data=form_data, headers=headers).content if html: source = re.search( r"""href=["'](.+?oneload.co:\d+/d/\w+/([^"']+)).+?>\2</a>""", html) if source: return source.group(1) + helpers.append_headers(headers) raise ResolverError('Video not found')
def refresh_token(self): REFRESH_TOKEN = self.get_setting('refresh') logger.log_debug('Refreshing Expired Debrid-Link Token: |{0}|'.format(REFRESH_TOKEN)) try: url = '{0}/oauth/token'.format(api_url[:-3]) data = {'client_id': CLIENT_ID, 'refresh_token': REFRESH_TOKEN, 'grant_type': 'refresh_token'} if self.headers.get('Authorization', False): self.headers.pop('Authorization') js_result = json.loads(self.net.http_POST(url, form_data=data, headers=self.headers).content) if js_result.get('access_token', False): self.set_setting('token', js_result.get('access_token')) self.headers.update({'Authorization': 'Bearer {0}'.format(self.get_setting('token'))}) return True else: # empty all auth settings to force a re-auth on next use self.reset_authorization() raise ResolverError('Unable to Refresh Debrid-Link Token') except Exception as e: self.reset_authorization() logger.log_debug('Debrid-Link Authorization Failed: {0}'.format(e)) return False
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) web_url = self.net.http_GET(web_url)._response.url headers = {'Referer': web_url} tries = 0 while tries < MAX_TRIES: html = self.net.http_GET(web_url).content html = html.replace('\n', '') r = re.search('<iframe\s+src\s*=\s*"([^"]+)', html) if r: web_url = r.group(1) else: break tries += 1 html = self.net.http_GET(web_url, headers=headers).content r = re.search('file\s*:\s*"([^"]+)', html) if r: return r.group(1) + helpers.append_headers({'Referer': web_url}) raise ResolverError( 'Unable to resolve youwatch link. Filelink not found.')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT, 'Origin': 'https://www.{0}'.format(host), 'Referer': web_url} html = self.net.http_GET(web_url, headers=headers).content data = helpers.get_hidden(html) data.update({"method_free": "Download Gratuito >>"}) html = self.net.http_POST(web_url, form_data=data, headers=headers).content tries = 0 while tries < MAX_TRIES: data = helpers.get_hidden(html) data.update({"method_free": "Download Gratuito >>"}) data.update(captcha_lib.do_captcha(html)) common.kodi.sleep(15000) html = self.net.http_POST(web_url, form_data=data, headers=headers).content r = re.search(r'''id="direct_link".+?href="([^"]+)''', html, re.DOTALL) if r: # headers.update({'verifypeer': 'false'}) return r.group(1).replace(' ', '%20') + helpers.append_headers(headers) tries += 1 raise ResolverError('Unable to locate link')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'Referer': web_url} headers.update(self.headers) html = self.net.http_GET(web_url, headers=headers).content sources = self.__parse_sources_list(html) if sources: try: vt = self.__auth_ip(media_id) if vt: source = helpers.pick_source( sources, self.get_setting('auto_pick') == 'true') return '%s?direct=false&ua=1&vt=%s' % ( source, vt) + helpers.append_headers( {'User-Agent': common.SMU_USER_AGENT}) except urllib2.HTTPError: source = helpers.pick_source( sources, self.get_setting('auto_pick') == 'true') return source else: raise ResolverError('Unable to locate links')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) resp = self.net.http_GET(web_url) html = resp.content headers = dict(resp._response.info().items()) headers = {'Cookie': headers['set-cookie']} headers['User-Agent'] = common.FF_USER_AGENT r = re.search('player.swf\?f=(.*?)"', html) if r: stream_xml = r.group(1) headers[ 'Referer'] = 'http://www.apnasave.club/media/player/player.swf?f=%s' % stream_xml response = self.net.http_GET(stream_xml, headers=headers) xmlhtml = response.content r2 = re.search('<src>(.*?)</src>', xmlhtml) stream_url = r2.group(1) + helpers.append_headers(headers) else: raise ResolverError('no file located') return stream_url
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content if isinstance(html, unicode): html = html.encode('utf-8', 'ignore') match = re.search('''['"]video['"]\s*:\s*(\[[^\]]+\])''', html, re.I | re.M | re.DOTALL) if not match: raise ResolverError('File Not Found or removed') sources = [ i.get('url') for i in json.loads(match.group(1)) if i.get('url') ][0] uuid = self.net.get_cookies().get('.' + host).get('/').get( 'UniversalUserID').value return sources + helpers.append_headers( { 'Cookie': 'UniversalUserID=%s' % uuid, 'User-Agent': common.RAND_UA })
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} _html = self.net.http_GET(web_url, headers=headers).content data = helpers.get_hidden(_html) headers.update({'Referer': web_url}) common.kodi.sleep(3000) html = self.net.http_POST(web_url, headers=headers, form_data=data).content if html: packed = helpers.get_packed_data(html) _sources = re.search("""sources:\s*\[([^\]]+)""", packed) if _sources: sources = re.findall("""["']([^"'\s,]+)""", _sources.group(1)) if sources: from urlparse import urlparse sources = [(urlparse(source).path.split('/')[-1], source) for source in sources] return helpers.pick_source( sources) + helpers.append_headers(headers) raise ResolverError('File not found')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content if 'File Not Found' in html: raise ResolverError('File Not Found') web_url = 'http://nosvideo.com/vj/video.php?u=%s&w=&h=530' % media_id html = self.net.http_GET(web_url).content smil_url = re.compile('\':\'(.+?)\'').findall(html) smil_url = [i for i in smil_url if '.smil' in i][0] html = self.net.http_GET(smil_url).content streamer = re.findall('base\s*=\s*"(.+?)"', html)[0] playpath = re.findall('src\s*=\s*"(.+?)"', html)[0] stream_url = '%s playpath=%s' % (streamer, playpath) return stream_url