def resolve(url): m = _regex(url) if m: items = [] quality = "???" vurl = m.group('url') vurl = re.sub('\&[^$]*','',vurl) util.init_urllib() req = urllib2.Request('http://api.video.mail.ru/videos/' + vurl + '.json') resp = urllib2.urlopen(req) data = resp.read() vkey = [] for cookie in re.finditer('(video_key=[^\;]+)',resp.headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL): vkey.append(cookie.group(1)) headers = {'Cookie':vkey[-1]} item = util.json.loads(data) for qual in item[u'videos']: if qual == 'sd': quality = "480p" elif qual == "hd": quality = "640p" else: quality = "???" link = item[u'videos'][qual] items.append({'quality':quality, 'url':link, 'headers':headers}) return items
def resolve(url): m = _regex(url) if m: items = [] vurl = m.group('url') vurl = re.sub('\&[^$]*', '', vurl) vurl = re.sub('/embed', '', vurl) vurl = 'http://videoapi.my.mail.ru/' + vurl + '.json' util.init_urllib() req = urllib2.Request(vurl) req.add_header('User-Agent', util.UA) resp = urllib2.urlopen(req) data = resp.read() vkey = [] for cookie in re.finditer('(video_key=[^\;]+)', resp.headers.get('Set-Cookie'), re.IGNORECASE | re.DOTALL): vkey.append(cookie.group(1)) headers = {'Cookie': vkey[-1]} item = util.json.loads(data) for v in item[u'videos']: quality = v['key'] link = v['url'] items.append({'quality': quality, 'url': link, 'headers': headers}) return items
def list(self, url, filter=None): if url.find('ucet/favourites') >= 0 and self.login(): return self.list_favourites(url) url = self._url(url) util.init_urllib() page = util.request(url) adult = '0' if __settings__('hellspy_adult') == 'true': adult = '1' if page.find('adultWarn-') > 0: page = util.request(url + '&adultControl-state=' + adult + '&do=adultControl-confirmed') data = util.substr(page, '<div class=\"file-list file-list-horizontal', '<div id=\"layout-push') result = [] for m in re.finditer( '<div class=\"file-entry.+?<div class="preview.+?<div class=\"data.+?</div>', data, re.IGNORECASE | re.DOTALL): entry = m.group(0) item = self.video_item() murl = re.search( '<[hH]3><a href=\"(?P<url>[^\"]+)[^>]+>(?P<name>[^<]+)', entry) if murl: item['url'] = murl.group('url') item['title'] = murl.group('name') mimg = re.search('<img src=\"(?P<img>[^\"]+)', entry) if mimg: item['img'] = mimg.group('img') msize = re.search( '<span class=\"file-size[^>]+>(?P<size>[^<]+)', entry) if msize: item['size'] = msize.group('size').strip() mtime = re.search( '<span class=\"duration[^>]+>(?P<time>[^<]+)', entry) if mtime: item['length'] = mtime.group('time').strip() self._filter(result, item) # page navigation data = util.substr(page, '<div class=\"paginator', '</div') mprev = re.search('<li class=\"prev[^<]+<a href=\"(?P<url>[^\"]+)', data) if mprev: item = self.dir_item() item['type'] = 'prev' item['url'] = mprev.group('url') result.append(item) mnext = re.search('<li class=\"next[^<]+<a href=\"(?P<url>[^\"]+)', data) if mnext: item = self.dir_item() item['type'] = 'next' item['url'] = mnext.group('url').replace('&', '&') result.append(item) return result
def __init__(self, username=None, password=None, filter=None): ContentProvider.__init__(self, 'sledujufilmy.cz', self.urls['Filmy'], username, password, filter) # Work around April Fools' Day page util.init_urllib(self.cache) cookies = self.cache.get('cookies') if not cookies or len(cookies) == 0: util.request(self.base_url)
def __init__(self, username=None, password=None, filter=None, reverse_eps=False): ContentProvider.__init__(self, name='sosac.ph', base_url=MOVIES_BASE_URL, username=username, password=password, filter=filter) util.init_urllib(self.cache) cookies = self.cache.get(util.CACHE_COOKIES) if not cookies or len(cookies) == 0: util.request(self.base_url) self.reverse_eps = reverse_eps
def __init__(self, username=None, password=None, filter=None, quickparser=False): ContentProvider.__init__(self, 'sledujufilmy.cz', self.urls['Filmy'], username, password, filter) # Work around April Fools' Day page util.init_urllib(self.cache) self.quickparser=quickparser cookies = self.cache.get('cookies') if not cookies or len(cookies) == 0: util.request(self.base_url)
def resolve(self,item,captcha_cb=None,select_cb=None): item = item.copy() url = self._url(item['url']).replace('×', '%c3%97') data = util.substr(util.request(url), '<div id=\"content\"', '#content') for script in re.finditer('<script.+?src=\"([^\"]+)',data,re.IGNORECASE|re.DOTALL): try: data += util.request(script.group(1)).replace('\\\"','\"') except: pass util.init_urllib() # need to reinitialize urrlib, because anyfiles could have left some cookies visionone_resolved, onevision_resolved, scz_resolved = [],[],[] onevision = re.search('(?P<url>http://onevision\.ucoz\.ua/[^<]+)', data, re.IGNORECASE) if onevision: onevision_data = util.substr(util.request(onevision.group('url')),'<td class=\"eText\"','<td class=\"rightColumn\"') onevision_resolved=self.findstreams(onevision_data, ['<embed( )src=\"(?P<url>[^\"]+)', '<object(.+?)data=\"(?P<url>[^\"]+)', '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]', '<object.*?data=(?P<url>.+?)</object>']) visionone = re.search('(?P<url>http://visionone\.ucoz\.ru/[^<]+)', data, re.IGNORECASE) if visionone: visionone_data = util.substr(util.request(visionone.group('url')),'<td class=\"eText\"','<td class=\"rightColumn\"') visionone_resolved = self.findstreams(visionone_data, ['<embed( )src=\"(?P<url>[^\"]+)', '<object(.+?)data=\"(?P<url>[^\"]+)', '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]', '<object.*?data=(?P<url>.+?)</object>']) scz = re.search('(?P<url>http://scz\.uvadi\.cz/\?p=[\d]+)', data, re.IGNORECASE) if scz: scz_data = util.substr(util.request(scz.group('url')),'<div id=\"content\"', '#content') scz_resolved = self.findstreams(scz_data, ['<embed( )src=\"(?P<url>[^\"]+)', '<object(.+?)data=\"(?P<url>[^\"]+)', '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]', '<object.*?data=(?P<url>.+?)</object>']) serialy_resolved = self.findstreams(data, ['<embed( )src=\"(?P<url>[^\"]+)', '<object(.+?)data=\"(?P<url>[^\"]+)', '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]', '<object.*?data=(?P<url>.+?)</object>', '<p><code><strong>(?P<url>http.+?)</strong></code></p>', '<p><code><strong><big>(?P<url>.+?)</big></strong></code></p>']) resolved = [] resolved+= serialy_resolved or [] resolved+= visionone_resolved or [] resolved+= onevision_resolved or [] resolved+= scz_resolved or [] resolved = len(resolved) > 0 and resolved or None if len(resolved) == 1: return resolved[0] elif len(resolved) > 1 and select_cb: return select_cb(resolved)
def resolve(self,item,captcha_cb=None,select_cb=None): item = item.copy() util.init_urllib() headers,req = self._create_request(item['url'],{'ident':'','wst':self.token}) data = util.post(self._url('api/file_link/'),req,headers=headers) xml = ET.fromstring(data) if not xml.find('status').text == 'OK': self.error('Server returned error status, response: %s' % data) raise ResolveException(xml.find('message').text) item['url'] = xml.find('link').text return item
def resolve(url): m = _regex(url) if m: util.init_urllib() data = util.request(url) if data.find('Toto video neexistuje') > 0: util.error('Video bylo smazano ze serveru') return player = 'http://www.streamuj.tv/new-flash-player/mplugin4.swf' headers = { 'User-Agent': util.UA, 'Referer': 'http://www.streamuj.tv/mediaplayer/player.swf', 'Cookie': ','.join("%s=%s" % (c.name, c.value) for c in util._cookie_jar) } index = 0 result = [] qualities = re.search(r'rn\:[^\"]*\"([^\"]*)', data, re.IGNORECASE | re.DOTALL) langs = re.search(r'langs\:[^\"]*\"([^\"]+)', data, re.IGNORECASE | re.DOTALL) languages = [''] # pretend there is at least language so we read 1st stream info if langs: languages = langs.group(1).split(',') for language in languages: streams = re.search(r'res{index}\:[^\"]*\"([^\"]+)'.format(index=index), data, re.IGNORECASE | re.DOTALL) subs = re.search(r'sub{index}\:[^\"]*\"([^\"]+)'.format(index=index), data, re.IGNORECASE | re.DOTALL) if subs: subs = re.search(r'[^>]+>([^,$]+)', subs.group(1), re.IGNORECASE | re.DOTALL) else: subs = None if streams and qualities: streams = streams.group(1).split(',') rn = qualities.group(1).split(',') qindex = 0 for stream in streams: q = rn[qindex] if q == 'HD': q = '720p' else: q = 'SD' item = { 'url': stream, 'quality': q, 'headers': headers, 'lang': language } if subs: link = subs.group(1) item['lang'] += ' + subs' item['subs'] = link result.append(item) qindex += 1 index += 1 return result
def resolve(self,item,captcha_cb=None,select_cb=None): item = item.copy() util.init_urllib() url = self._url(item['url']) try: request = urllib2.Request(url) response = urllib2.urlopen(request) page = response.read() response.close() except urllib2.HTTPError, e: traceback.print_exc() return
def resolve(url): cookies = {} util.init_urllib(cookies) data = util.request(url) view = pickle.loads(util._cookie_jar.dump())[ '.mojevideo.sk']['/'].keys()[0] st = re.search(r'vHash=\[\'([^\']+)', data) if not st: return None st = st.group(1) tim = int(time.time()) base = 'http://fs5.mojevideo.sk:8080/securevd/' return [{'url': base + view.replace('view', '') + '.mp4?st=%s&e=%s|Cookie=%s=1' % (st, tim, view)}]
def __init__(self, username=None, password=None, filter=None): try: ContentProvider.__init__(self, name='czsklib', base_url='/', username=username, password=password, filter=filter) util.init_urllib() self.wsuser = username self.wspass = password self.language = language.getLanguage() self.init_trans() orangelog.logDebug("init orangetv..."); self.session = None except: orangelog.logError("init orangetv failed.\n%s"%traceback.format_exc()) pass
def resolve(url): m = _regex(url) if m: util.init_urllib() data = util.request(url) if data.find('Toto video neexistuje') > 0: util.error('Video bylo smazano ze serveru') return player = 'http://www.streamuj.tv/new-flash-player/mplugin4.swf' headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0', 'Referer':'http://www.streamuj.tv/mediaplayer/player.swf', 'Cookie':','.join("%s=%s"%(c.name, c.value) for c in util._cookie_jar)} burl = b64decode('aHR0cDovL2Z1LWNlY2gucmhjbG91ZC5jb20vcGF1dGg=') key = util.request('http://www.streamuj.tv/_key.php?auth=3C27f5wk6qB3g7nZ5SDYf7P7k1572rFH1QxV0QQ') index = 0 result = [] qualities = re.search('rn\:[^\"]*\"([^\"]*)',data,re.IGNORECASE|re.DOTALL) langs = re.search('langs\:[^\"]*\"([^\"]+)',data,re.IGNORECASE|re.DOTALL) languages = [] if not langs: languages = [''] # pretend there is at least language so we read 1st stream info else: languages = langs.group(1).split(',') for lang in languages: streams = re.search('res'+str(index)+'\:[^\"]*\"([^\"]+)',data,re.IGNORECASE|re.DOTALL) subs = re.search('sub'+str(index)+'\:[^\"]*\"([^\"]+)',data,re.IGNORECASE|re.DOTALL) if subs: subs = re.search('[^>]+>([^$]+)',subs.group(1),re.IGNORECASE|re.DOTALL) if streams and qualities: streams = streams.group(1).split(',') rn = qualities.group(1).split(',') qindex = 0 for stream in streams: res = json.loads(util.post_json(burl,{'link':stream,'player':player,'key':key})) stream = res['link'] q = rn[qindex] if q == 'HD': q = '720p' else: q = 'SD' l = ' '+lang if subs: l += ' + subs' s = subs.group(1) s = json.loads(util.post_json(burl,{'link':s,'player':player, 'key':key})) result.append({'url':stream,'quality':q,'subs':s['link'],'headers':headers,'lang':l}) else: result.append({'url':stream,'quality':q,'headers':headers, 'lang':l}) qindex+=1 index+=1 return result
def __init__(self, username=None, password=None, filter=None, uid=None): ContentProvider.__init__(self, name='czsklib', base_url=sctop.BASE_URL, username=username, password=password, filter=filter) self.tr = tracker.TrackerInfo().getSystemInfo() self.uid = uid util.UA = self.tr['useragent'] + ' ver' + str(sctop.addonInfo('version')) #util.debug("[SC] tr: %s" % str(self.tr)) self.cache = sctop.cache util.debug("[SC] init cache %s" % self.cache.__class__.__name__) util.init_urllib(self.cache) cookies = self.cache.get('cookies') #if not cookies or len(cookies) == 0: # util.request(self._url(self.base_url)) self.ws = None
def resolve(url): if not _regex(url) == None: util.init_urllib() web_url = get_host(url) data = util.substr(util.request(web_url),'<form method=\"post','</form>') # need to POST input called confirm to url, fields: hash, confirm found in request' m = re.search('<input(.+?)value=\"(?P<hash>[^\"]+)(.+?)name=\"hash\"(.+?)<input name=\"confirm\"(.+?)value=\"(?P<confirm>[^\"]+)',data,re.IGNORECASE | re.DOTALL) if not m == None: data = util.post(web_url,{'confirm':m.group('confirm'),'hash':m.group('hash')}) # now, we've got (flow)player data = util.substr(data,'flowplayer(','</script>') n = re.search('playlist\: \'(?P<pls>[^\']+)',data,re.IGNORECASE | re.DOTALL) if not n == None: # now download playlist xml = util.request('http://www.putlocker.com'+n.group('pls')) stream = re.search('url=\"([^\"]+)\" type=\"video',xml,re.IGNORECASE | re.DOTALL).group(1) return [{'url':stream}]
def resolve(url): cookies = {} util.init_urllib(cookies) data = util.request(url) view = list( pickle.loads(util._cookie_jar.dump())['.mojevideo.sk']['/'].keys())[0] st = re.search(r'vHash=\[\'([^\']+)', data) if not st: return None st = st.group(1) tim = int(time.time()) base = 'http://fs5.mojevideo.sk:8080/securevd/' return [{ 'url': base + view.replace('view', '') + '.mp4?st=%s&e=%s|Cookie=%s=1' % (st, tim, view) }]
def __init__(self, username=None, password=None, filter=None, uid=None): ContentProvider.__init__(self, name='czsklib', base_url=sctop.BASE_URL, username=username, password=password, filter=filter) self.tr = tracker.TrackerInfo().getSystemInfo() self.uid = uid util.UA = self.tr['useragent'] #util.debug("[SC] tr: %s" % str(self.tr)) util.init_urllib(self.cache) cookies = self.cache.get('cookies') if not cookies or len(cookies) == 0: util.request(self.base_url) self.ws = None
def resolve(self,item,captcha_cb=None,select_cb=None): item = item.copy() util.init_urllib() url = self._url(item['url']) page = '' try: opener = urllib2.OpenerDirector() opener.add_handler(urllib2.HTTPHandler()) opener.add_handler(urllib2.UnknownHandler()) urllib2.install_opener(opener) request = urllib2.Request(url) request.add_header('User-Agent',util.UA) response= urllib2.urlopen(request) page = response.read() response.close() except urllib2.HTTPError, e: traceback.print_exc() return
def resolve(url): cookies = {} result = [] util.init_urllib(cookies) id = re.search(r'.*player/flash/(?P<url>.+)', url).group('url') r = util.request('http://myvi.ru/player/api/Video/Get/%s?sig' % id) jsondata = demjson.decode(r) playlist = jsondata['sprutoData']['playlist'][0] uuid = pickle.loads( util._cookie_jar.dump())['.myvi.ru']['/']['UniversalUserID'] for f in playlist['video']: streamurl = f['url'] streamurl += '|Cookie=UniversalUserID%3D' + urllib.quote(uuid.value) streamurl += '&User-Agent=' + UA result.append({'url': streamurl}) if result: return result else: return None
def resolve(url): cookies = {} result = [] util.init_urllib(cookies) id = re.search(r'.*player/flash/(?P<url>.+)', url).group('url') r = util.request('http://myvi.ru/player/api/Video/Get/%s?sig' % id) jsondata = demjson.decode(r) playlist = jsondata['sprutoData']['playlist'][0] uuid = pickle.loads(util._cookie_jar.dump())[ '.myvi.ru']['/']['UniversalUserID'] for f in playlist['video']: streamurl = f['url'] streamurl += '|Cookie=UniversalUserID%3D' + urllib.quote(uuid.value) streamurl += '&User-Agent=' + UA result.append({'url': streamurl}) if result: return result else: return None
def __init__(self, username=None, password=None, filter=None): try: ContentProvider.__init__(self, name='czsklib', base_url='/', username=username, password=password, filter=filter) util.init_urllib() self.wsuser = username self.wspass = password self.language = language.getLanguage() self.init_trans() orangelog.logDebug("init orangetv...") self.session = None except: orangelog.logError("init orangetv failed.\n%s" % traceback.format_exc()) pass
def list(self,url,filter=None): if url.find('ucet/favourites') >= 0 and self.login(): return self.list_favourites(url) url = self._url(url) util.init_urllib() page = util.request(url) data = util.substr(page,'<div class=\"file-list file-list-horizontal','<div id=\"layout-push') result = [] for m in re.finditer('<div class=\"file-entry.+?<div class="preview.+?<div class=\"data.+?</div>',data, re.IGNORECASE|re.DOTALL): entry = m.group(0) item = self.video_item() murl = re.search('<[hH]3><a href=\"(?P<url>[^\"]+)[^>]+>(?P<name>[^<]+)',entry) item['url'] = murl.group('url') item['title'] = murl.group('name') mimg = re.search('<img src=\"(?P<img>[^\"]+)',entry) if mimg: item['img'] = mimg.group('img') msize = re.search('<span class=\"file-size[^>]+>(?P<size>[^<]+)',entry) if msize: item['size'] = msize.group('size').strip() mtime = re.search('<span class=\"duration[^>]+>(?P<time>[^<]+)',entry) if mtime: item['length'] = mtime.group('time').strip() self._filter(result,item) # page navigation data = util.substr(page,'<div class=\"paginator','</div') mprev = re.search('<li class=\"prev[^<]+<a href=\"(?P<url>[^\"]+)',data) if mprev: item = self.dir_item() item['type'] = 'prev' item['url'] = mprev.group('url') result.append(item) mnext = re.search('<li class=\"next[^<]+<a href=\"(?P<url>[^\"]+)',data) if mnext: item = self.dir_item() item['type'] = 'next' item['url'] = mnext.group('url').replace('&','&') result.append(item) return result
def __init__(self, username=None, password=None, filter=None, uid=None): ContentProvider.__init__(self, name='czsklib', base_url=sctop.BASE_URL, username=username, password=password, filter=filter) self.tr = tracker.TrackerInfo().getSystemInfo() self.uid = uid util.UA = self.tr['useragent'] + ' ver' + str( sctop.addonInfo('version')) #util.debug("[SC] tr: %s" % str(self.tr)) self.cache = sctop.cache self.ws = wx(sctop.getSetting('wsuser'), sctop.getSetting('wspass'), self.cache) util.debug("[SC] init cache %s" % self.cache.__class__.__name__) util.init_urllib(self.cache) cookies = self.cache.get('cookies') hasTrakt = str(sctop.getSetting('trakt.token') != '') util.debug('[SC] has trakt: %s' % hasTrakt) sctop.win.setProperty('sc.trakt', hasTrakt)
def resolve(url): m = _regex(url) if m: items = [] vurl = m.group('url') vurl = re.sub('\&[^$]*','',vurl) vurl = re.sub('/embed','',vurl) vurl = 'http://videoapi.my.mail.ru/' + vurl + '.json' util.init_urllib() req = urllib2.Request(vurl) req.add_header('User-Agent', util.UA) resp = urllib2.urlopen(req) data = resp.read() vkey = [] for cookie in re.finditer('(video_key=[^\;]+)',resp.headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL): vkey.append(cookie.group(1)) headers = {'Cookie':vkey[-1]} item = util.json.loads(data) for v in item[u'videos']: quality = v['key'] link = v['url'] items.append({'quality':quality, 'url':link, 'headers':headers}) return items
def resolve(self,item,captcha_cb=None,select_cb=None): item = item.copy() url = self._url(item['url']) data = util.request(self._url(item['url'])) data = util.substr(data,'<div id=\"stred','<div id=\'patka>') for script in re.finditer('<script.+?src=\"([^\"]+)',data,re.IGNORECASE|re.DOTALL): try: data += util.request(script.group(1)).replace('\\\"','\"') except: pass util.init_urllib() # need to reinitialize urrlib, because anyfiles could have left some cookies result = self.findstreams(data,[ '<embed( )*flashvars=\"file=(?P<url>[^\"]+)', '<embed( )src=\"(?P<url>[^\"]+)', '<object(.+?)data=\"(?P<url>[^\"]+)', '<object.*?data=(?P<url>.+?)</object>', '<iframe(.+?)src=[\"\' ](?P<url>.+?)[\'\" ]', ]) if len(result)==1: return result[0] elif len(result) > 1 and select_cb: return select_cb(result)
if "dvd" in p.keys(): dvd(p) if "fav" in p.keys(): favourites(p) if "filmoteka" in p.keys(): filmoteka(p) if "artists" in p.keys(): artists(p) if "search-plugin" in p.keys(): search_plugin(p["search-plugin"], p["url"], p["action"]) if "item" in p.keys(): item(p) if "person" in p.keys(): person(p) if "preload-refresh" in p.keys(): return preload_refresh() if "play" in p.keys(): play(p["play"]) search.main(__addon__, "search_history_movies", p, _search_movie_cb, "s", "movie") search.main(__addon__, "search_history_persons", p, _search_person_cb, "s", "person") __addon__.setSetting("last-url", sys.argv[2]) p = util.params() util.init_urllib() if __addon__.getSetting("clear-cache") == "true": util.info("Cleaning all cache entries...") __addon__.setSetting("clear-cache", "false") __cache__.delete("http%") main(p)
def resolve(url): m = _regex(url) if m: util.init_urllib() data = util.request(url) if data.find('Toto video neexistuje') > 0: util.error('Video bylo smazano ze serveru') return player = 'http://www.streamuj.tv/new-flash-player/mplugin4.swf' headers = { 'User-Agent': util.UA, 'Referer': 'http://www.streamuj.tv/mediaplayer/player.swf', 'Cookie': ','.join("%s=%s" % (c.name, c.value) for c in util._cookie_jar) } burl = b64decode('aHR0cDovL2Z1LWNlY2gucmhjbG91ZC5jb20vcGF1dGg=') key = util.request( 'http://www.streamuj.tv/_key.php?auth=3C27f5wk6qB3g7nZ5SDYf7P7k1572rFH1QxV0QQ' ) index = 0 result = [] qualities = re.search(r'rn\:[^\"]*\"([^\"]*)', data, re.IGNORECASE | re.DOTALL) langs = re.search(r'langs\:[^\"]*\"([^\"]+)', data, re.IGNORECASE | re.DOTALL) languages = [ '' ] # pretend there is at least language so we read 1st stream info if langs: languages = langs.group(1).split(',') for language in languages: streams = re.search( r'res{index}\:[^\"]*\"([^\"]+)'.format(index=index), data, re.IGNORECASE | re.DOTALL) subs = re.search( r'sub{index}\:[^\"]*\"([^\"]+)'.format(index=index), data, re.IGNORECASE | re.DOTALL) if subs: subs = re.search(r'[^>]+>([^,$]+)', subs.group(1), re.IGNORECASE | re.DOTALL) else: subs = None if streams and qualities: streams = streams.group(1).split(',') rn = qualities.group(1).split(',') qindex = 0 for stream in streams: res = json.loads( util.post_json(burl, { 'link': stream, 'player': player, 'key': key })) stream = res['link'] q = rn[qindex] if q == 'HD': q = '720p' else: q = 'SD' item = { 'url': stream, 'quality': q, 'headers': headers, 'lang': language } if subs: link = subs.group(1) response = json.loads( util.post_json(burl, { 'link': link, 'player': player, 'key': key })) if 'link' in response: item['lang'] += ' + subs' item['subs'] = response[u'link'] else: util.error( "Could not fetch subtitles from '{}'".format( link)) util.error("Server response: {}".format(response)) result.append(item) qindex += 1 index += 1 return result
def __init__(self, username=None, password=None, filter=None): ContentProvider.__init__(self, 'teevee.sk', 'http://www.teevee.sk', username, password, filter) util.init_urllib(self.cache)
def __init__(self, username=None, password=None, filter=None, tmp_dir='/tmp', quality='0'): ContentProvider.__init__(self, 'videoarchiv.markiza.sk', 'http://videoarchiv.markiza.sk', username, password, filter, tmp_dir) util.init_urllib() self.quality = quality self.useCache = True
def resolve(self, item, captcha_cb=None, select_cb=None): item = item.copy() util.init_urllib() url = self._url(item['url']) page = '' try: opener = OpenerDirector() opener.add_handler(HTTPHandler()) opener.add_handler(UnknownHandler()) install_opener(opener) request = Request(url) request.add_header('User-Agent', util.UA) response = urlopen(request) page = response.read() response.close() except HTTPError as e: traceback.print_exc() return data = util.substr(page, '<form method=post target=\"iframe_dwn\"', '</form>') action = re.search('action=(?P<url>[^>]+)', data, re.IGNORECASE | re.DOTALL) img = re.search('<img src=\"(?P<url>[^\"]+)', data, re.IGNORECASE | re.DOTALL) if img and action: sessid = [] for cookie in re.finditer('(PHPSESSID=[^\;]+)', response.headers.get('Set-Cookie'), re.IGNORECASE | re.DOTALL): sessid.append(cookie.group(1)) # we have to download image ourselves image = util.request(self._url(img.group('url')), headers={ 'Referer': url, 'Cookie': sessid[-1] }) img_file = os.path.join(self.tmp_dir, 'captcha.png') util.save_data_to_file(image, img_file) code = None if captcha_cb: code = captcha_cb({'id': '0', 'img': img_file}) if not code: self.info('No captcha received, exit') return request = urllib.urlencode({'code': code}) req = Request(self._url(action.group('url')), request) req.add_header('User-Agent', util.UA) req.add_header('Referer', url) req.add_header('Cookie', sessid[-1]) try: resp = urlopen(req) if resp.code == 302: file_url = resp.headers.get('location') else: file_url = resp.geturl() if file_url.find(action.group('url')) > 0: msg = resp.read() resp.close() js_msg = re.search('alert\(\'(?P<msg>[^\']+)', msg, re.IGNORECASE | re.DOTALL) if js_msg: raise ResolveException(js_msg.group('msg')) self.error(msg) raise ResolveException( 'Nelze ziskat soubor, zkuste to znovu') resp.close() if file_url.find('data') >= 0 or file_url.find( 'download_free') > 0: item['url'] = file_url return item self.error('wrong captcha, retrying') return self.resolve(item, captcha_cb, select_cb) except HTTPError: traceback.print_exc() return
def resolve(url): m = _regex(url) if not m == None: util.init_urllib() data = util.request( 'http://videobb.com/player_control/settings.php?v=%s&em=TRUE&fv=v1.1.67' % m.group('id')) json = data.replace('false', 'False').replace('true', 'True').replace('null', 'None') aData = eval('(' + json + ')') max_res = 99999 r = re.finditer('"l".*?:.*?"(.+?)".+?"u".*?:.*?"(.+?)"', json) chosen_res = 0 stream_url = False stream_url_part1 = False if r: for match in r: print(match.groups()) res, url = match.groups() res = int(res.strip('p')) if res > chosen_res and res <= max_res: stream_url_part1 = url.decode('base-64') chosen_res = res else: return if not stream_url_part1: return # Decode the link from the json data settings spn_ik = unhexlify( __decrypt(aData["settings"]["login_status"]["spen"], aData["settings"]["login_status"]["salt"], 950569)).split(';') spn = spn_ik[0].split('&') ik = spn_ik[1] for item in ik.split('&'): temp = item.split('=') if temp[0] == 'ik': key = __get_key(temp[1]) sLink = "" for item in spn: item = item.split('=') if (int(item[1]) == 1): sLink = sLink + item[0] + '=' + __decrypt( aData["settings"]["info"]["sece2"], aData["settings"]["config"]["rkts"], key) + '&' #decrypt32byte elif (int(item[1] == 2)): sLink = sLink + item[0] + '=' + __decrypt( aData["settings"]["banner"]["g_ads"]["url"], aData["settings"]["config"]["rkts"], key) + '&' elif (int(item[1]) == 3): sLink = sLink + item[0] + '=' + __decrypt( aData["settings"]["banner"]["g_ads"]["type"], aData["settings"]["config"]["rkts"], key, 26, 25431, 56989, 93, 32589, 784152) + '&' elif (int(item[1]) == 4): sLink = sLink + item[0] + '=' + __decrypt( aData["settings"]["banner"]["g_ads"]["time"], aData["settings"]["config"]["rkts"], key, 82, 84669, 48779, 32, 65598, 115498) + '&' elif (int(item[1]) == 5): sLink = sLink + item[0] + '=' + __decrypt( aData["settings"]["login_status"]["euno"], aData["settings"]["login_status"]["pepper"], key, 10, 12254, 95369, 39, 21544, 545555) + '&' elif (int(item[1]) == 6): sLink = sLink + item[0] + '=' + __decrypt( aData["settings"]["login_status"]["sugar"], aData["settings"]["banner"]["lightbox2"]["time"], key, 22, 66595, 17447, 52, 66852, 400595) + '&' sLink = sLink + "start=0" stream_url = stream_url_part1 + '&' + sLink return [{'url': stream_url}]
def resolve(url): m = _regex(url) if m: util.init_urllib() data = util.request(url) if data.find('Toto video neexistuje') > 0: util.error('Video bylo smazano ze serveru') return player = 'http://www.streamuj.tv/new-flash-player/mplugin4.swf' headers = { 'User-Agent': util.UA, 'Referer': 'http://www.streamuj.tv/mediaplayer/player.swf', 'Cookie': ','.join("%s=%s" % (c.name, c.value) for c in util._cookie_jar) } index = 0 result = [] qualities = re.search(r'rn\:[^\"]*\"([^\"]*)', data, re.IGNORECASE | re.DOTALL) langs = re.search(r'langs\:[^\"]*\"([^\"]+)', data, re.IGNORECASE | re.DOTALL) languages = [ '' ] # pretend there is at least language so we read 1st stream info if langs: languages = langs.group(1).split(',') for language in languages: streams = re.search( r'res{index}\:[^\"]*\"([^\"]+)'.format(index=index), data, re.IGNORECASE | re.DOTALL) subs = re.search( r'sub{index}\:[^\"]*\"([^\"]+)'.format(index=index), data, re.IGNORECASE | re.DOTALL) if subs: subs = re.search(r'[^>]+>([^,$]+)', subs.group(1), re.IGNORECASE | re.DOTALL) else: subs = None if streams and qualities: streams = streams.group(1).split(',') rn = qualities.group(1).split(',') qindex = 0 for stream in streams: q = rn[qindex] if q == 'HD': q = '720p' else: q = 'SD' item = { 'url': stream, 'quality': q, 'headers': headers, 'lang': language } if subs: link = subs.group(1) item['lang'] += ' + subs' item['subs'] = link result.append(item) qindex += 1 index += 1 return result