def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} html = self.net.http_GET(web_url, headers=headers).content if html: srcs = re.findall(r'href="(%s&q=[^"]+)' % web_url, html, re.I) if srcs: sources = [] for src in srcs: shtml = self.net.http_GET(src, headers=headers).content strurl = helpers.parse_html5_source_list(shtml) if strurl: sources.append(strurl[0]) if len(sources) > 1: try: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: common.logger.log_debug('Scrape sources sort failed |int(re.sub("\D", "", x[0])|') try: sources.sort(key=lambda x: re.sub("[^a-zA-Z]", "", x[0])) except: common.logger.log_debug('Scrape sources sort failed |re.sub("[^a-zA-Z]", "", x[0])|') else: sources = helpers.parse_html5_source_list(html) return helpers.pick_source(sources) + helpers.append_headers(headers) raise ResolverError("Video not found")
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} try: html = self.net.http_GET(web_url, headers=headers).content except urllib2.HTTPError as e: if e.code == 404: raise ResolverError("Video not found") srcs = re.findall(r'href="(%s&q=[^"]+)' % web_url, html, re.I) if srcs: sources = [] for src in srcs: shtml = self.net.http_GET(src, headers=headers).content strurl = helpers.parse_html5_source_list(shtml) if strurl: sources.append(strurl[0]) else: sources = helpers.parse_html5_source_list(html) if len(sources) > 0: sources = helpers.sort_sources_list(sources) return helpers.pick_source(sources) + helpers.append_headers(headers) else: raise ResolverError("Video not found")
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} try: html = self.net.http_GET(web_url, headers=headers).content except urllib2.HTTPError as e: if e.code == 404: raise ResolverError("Video not found") srcs = re.findall(r'href="(%s&q=[^"]+)' % web_url, html, re.I) if srcs: sources = [] for src in srcs: shtml = self.net.http_GET(src, headers=headers).content strurl = helpers.parse_html5_source_list(shtml) if strurl: sources.append(strurl[0]) sources = helpers.sort_sources_list(sources) else: sources = helpers.parse_html5_source_list(html) if len(sources) > 0: return helpers.pick_source(sources) + helpers.append_headers(headers) else: raise ResolverError("Video not found")
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} html = self.net.http_GET(web_url, headers=headers).content if html: srcs = re.findall(r'href="(%s&q=[^"]+)' % web_url, html, re.I) if srcs: sources = [] for src in srcs: shtml = self.net.http_GET(src, headers=headers).content strurl = helpers.parse_html5_source_list(shtml) if strurl: sources.append(strurl[0]) if len(sources) > 1: try: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: common.logger.log_debug( 'Scrape sources sort failed |int(re.sub("\D", "", x[0])|' ) try: sources.sort( key=lambda x: re.sub("[^a-zA-Z]", "", x[0])) except: common.logger.log_debug( 'Scrape sources sort failed |re.sub("[^a-zA-Z]", "", x[0])|' ) else: sources = helpers.parse_html5_source_list(html) return helpers.pick_source(sources) + helpers.append_headers( headers) raise ResolverError("Video not found")
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content try: r = re.search('flashvars.filekey=(.+?);', html) if r is None: raise Exception() r = r.group(1) try: filekey = re.compile('\s+%s="(.+?)"' % r).findall(html)[-1] except: filekey = r player_url = 'http://www.auroravid.to/api/player.api.php?key=%s&file=%s' % ( filekey, media_id) html = self.net.http_GET(player_url).content r = re.search('url=(.+?)&', html) if r: stream_url = r.group(1) return stream_url except: sources = helpers.parse_html5_source_list(html) source = helpers.pick_source( sources, self.get_setting('auto_pick') == 'true') return source + helpers.append_headers( {'User-Agent': common.FF_USER_AGENT}) raise ResolverError('File Not Found or removed')
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content try: r = re.search('flashvars.filekey=(.+?);', html) if r is None: raise Exception() r = r.group(1) try: filekey = re.compile('\s+%s="(.+?)"' % r).findall(html)[-1] except: filekey = r player_url = 'http://www.auroravid.to/api/player.api.php?key=%s&file=%s' % (filekey, media_id) html = self.net.http_GET(player_url).content r = re.search('url=(.+?)&', html) if r: stream_url = r.group(1) return stream_url except: sources = helpers.parse_html5_source_list(html) source = helpers.pick_source(sources) return source + helpers.append_headers({'User-Agent': common.FF_USER_AGENT}) raise ResolverError('File Not Found or removed')
def get_media_url(self, host, media_id): try: web_url = self.get_url(host, media_id) self.headers['Referer'] = web_url html = self.net.http_GET(web_url, headers=self.headers).content if isinstance(html, unicode): html = html.encode('utf-8', 'ignore') if 'Uptobox.com is not available in your country' in html: raise ResolverError('Unavailable in your country') r = re.search('(You have to wait (?:[0-9]+ minute[s]*, )*[0-9]+ second[s]*)', html) if r: raise ResolverError('Cooldown in effect') data = helpers.get_hidden(html) for _ in range(0, 3): try: html = self.net.http_POST(web_url, data, headers=self.headers).content if isinstance(html, unicode): html = html.encode('utf-8', 'ignore') stream_url = re.search('<a\shref\s*=[\'"](.+?)[\'"]\s*>\s*<span\sclass\s*=\s*[\'"]button_upload green[\'"]\s*>', html).group(1) return stream_url except: xbmc.sleep(1000) except: pass try: web_url = self.get_stream_url(host, media_id) self.headers['Referer'] = web_url html = self.net.http_GET(web_url, headers=self.headers).content if isinstance(html, unicode): html = html.encode('utf-8', 'ignore') if 'Uptobox.com is not available in your country' in html: raise ResolverError('Unavailable in your country') ''' r = re.search('(You have reached the limit of *[0-9]+ minute[s]*)', html) if r: raise Exception() ''' sources = helpers.parse_html5_source_list(html) try: sources.sort(key=lambda x: x[0], reverse=True) except: pass source = helpers.pick_source(sources) if source.startswith('//'): source = 'http:' + source return source except: pass raise ResolverError('File not found')
def get_media_url(self, host, media_id): headers = {'User-Agent': common.IOS_USER_AGENT} web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url, headers=headers).content sources = helpers.parse_html5_source_list(html) source = helpers.pick_source(sources, self.get_setting('auto_pick') == 'true') return source + helpers.append_headers(headers)
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content if 'File was deleted' in html: raise ResolverError('File was deleted') sources = helpers.parse_html5_source_list(html) source = helpers.pick_source(sources, self.get_setting('auto_pick') == 'true') return source + helpers.append_headers({'User-Agent': common.FF_USER_AGENT})
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} response = self.net.http_GET(web_url, headers=headers) html = response.content sources = re.findall('''['"]?url['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', html) if sources: print "S",sources[-1][0].replace('\\','') return sources[-1][0].replace('\\','') sources = helpers.parse_html5_source_list(html) print sources return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.FF_USER_AGENT} response = self.net.http_GET(web_url, headers=headers) html = response.content sources = re.findall( '''['"]?url['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', html) if sources: print "S", sources[-1][0].replace('\\', '') return sources[-1][0].replace('\\', '') sources = helpers.parse_html5_source_list(html) print sources return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) url = 'http://www.%s/' % (host) resp = self.net.http_GET(url) headers = resp.get_headers(as_dict=True) headers = { 'Cookie': headers.get('set-cookie', ''), 'User-Agent': common.FF_USER_AGENT, 'Referer': web_url } html = self.net.http_GET(web_url, headers=headers).content sources = helpers.parse_html5_source_list(html) source = helpers.pick_source(sources, self.get_setting('auto_pick') == 'true') return source + helpers.append_headers(headers)