Exemplo n.º 1
0
 def _resolve_vod(self, item):
     resolved = []
     data = util.request(self._url(item['url']))
     video_id = re.search("LiveboxPlayer.archiv\(.+?videoId:\s*'([^']+)'", data, re.DOTALL).group(1)
     #print "video_id", video_id
     player_data = util.request("http://embed.livebox.cz/ta3_v2/vod-source.js", {'Referer':self._url(item['url'])})
     #print "player_data", player_data
     url_format = re.search(r'my.embedurl = \[\{"src" : "([^"]+)"', player_data).group(1)
     #print "url_format", url_format
     manifest_url = "https:" + url_format.format(video_id)
     #print "manifest_url", manifest_url
     manifest = util.request(manifest_url)
     print "manifest", manifest
     for m in re.finditer('#EXT-X-STREAM-INF:PROGRAM-ID=\d+,BANDWIDTH=(?P<bandwidth>\d+).*?(,RESOLUTION=(?P<resolution>\d+x\d+))?\s(?P<chunklist>[^\s]+)', manifest, re.DOTALL):
         item = self.video_item()
         item['surl'] = item['title']
         item['quality'] = m.group('bandwidth')
         item['url'] = manifest_url[:manifest_url.rfind('/')+1] + m.group('chunklist')
         resolved.append(item)
     resolved = sorted(resolved, key=lambda x:int(x['quality']), reverse=True)
     if len(resolved) == 3:
         qualities = ['720p', '480p', '360p']
         for idx, item in enumerate(resolved):
             item['quality'] = qualities[idx]
     else:
         for idx, item in enumerate(resolved):
             item['quality'] += 'b/s'
     return resolved
Exemplo n.º 2
0
 def resolve(self, item, captcha_cb=None, select_cb=None):
     result = []
     item = item.copy()
     url = item['url']
     if url.endswith('live.html'):
         channel = re.search(r'http://(\w+)\.joj\.sk', url).group(1)
         for original, replacement in {'www': 'joj', 'plus': 'jojplus'}.items():
             if channel == original:
                 channel = replacement
                 break
         for quality, resolution in {'lq': '180p', 'mq': '360p', 'hq': '540p'}.items():
             item = self.video_item()
             item['quality'] = resolution
             item['url'] = 'http://http-stream.joj.sk/joj/' + channel + '/index-' + quality + '.m3u8'
             result.append(item)
     else:
         data = util.request(url)
         playerdata = re.search(r'<div\ class=\"jn-player\"(.+?)>', data).group(1)
         pageid = re.search(r'data-pageid=[\'\"]([^\'\"]+)', playerdata).group(1)
         basepath = re.search(r'data-basepath=[\'\"]([^\'\"]+)', playerdata).group(1)
         videoid = re.search(r'data-id=[\'\"]([^\'\"]+)', playerdata).group(1)
         playlisturl = basepath + 'services/Video.php?clip=' + videoid + 'pageId=' + pageid
         playlist = fromstring(util.request(playlisturl))
         balanceurl = basepath + 'balance.xml?nc=%d' % random.randint(1000, 9999)
         balance = fromstring(util.request(balanceurl))
         for video in playlist.find('files').findall('file'):
             item = self.video_item()
             item['img'] = playlist.attrib.get('large_image')
             item['length'] = playlist.attrib.get('duration')
             item['quality'] = video.attrib.get('quality')
             item['url'] = self.rtmp_url(video.attrib.get('path'), playlist.attrib.get('url'),
                                         video.attrib.get('type'), balance)
             result.append(item)
         result.reverse()
     return select_cb(result)
def resolve(url):
    data = util.extract_jwplayer_setup(util.request(url))
    if data and 'sources' in data:
        result = []
        for source in data['sources']:
            items = []
            if source['file'].endswith('.smil'):
                tree = ElementTree.fromstring(util.request(source['file']))
                base_path = tree.find('./head/meta').get('base')
                for video in tree.findall('./body/switch/video'):
                    items.append({
                        'url': '%s playpath=%s pageUrl=%s swfUrl=%s swfVfy=true' %
                               (base_path, video.get('src'), url,
                                'http://static.flashx.tv/player6/jwplayer.flash.swf'),
                        'quality': video.get('height') + 'p'
                    })
            else:
                items.append({'url': source['file']})
            if len(data['tracks']) > 0:
                for item in items:
                    for track in data['tracks']:
                        new_item = deepcopy(item)
                        new_item['subs'] = track['file']
                        new_item['lang'] = ' %s subtitles' % track['label']
                        result.append(new_item)
            else:
                result += items
        return result
    return None
Exemplo n.º 4
0
 def list(self, url):
     if url.find('zebricky/') == 0:
         return self.list_top10(util.request(self.base_url+url))
     if url.find("#related#") == 0:
         return self.list_related(util.request(url[9:]))
     else:
         return self.list_content(util.request(self._url(url)), self._url(url))
Exemplo n.º 5
0
 def resolve(self,item,captcha_cb=None,select_cb=None):
     item = item.copy()
     url = self._url(item['url'])
     data = util.request(self._url(item['url']))	
     data = util.substr(data,'<div class=\"video','</div')
     sosac = re.search('\"(http\://[\w]+\.sosac\.ph[^\"]+)',data,re.DOTALL)
     if sosac:
         data = util.request(sosac.group(1))
     resolved = resolver.findstreams(data,[
         '<embed( )*flashvars=\"file=(?P<url>[^\"]+)',
         '<embed( )src=\"(?P<url>[^\"]+)',
         '<object(.+?)data=\"(?P<url>[^\"]+)',
         '<iframe(.+?)src=[\"\' ](?P<url>.+?)[\'\" ]',
         ])
     result = []
     if not resolved:
         self.error('Nothing resolved')
     for i in resolved:
         item = self.video_item()
         item['title'] = i['name']
         item['url'] = i['url']
         item['quality'] = i['quality']
         item['surl'] = i['surl']
         item['subs'] = i['subs']
         result.append(item)	
     if len(result)==1:
         return result[0]
     elif len(result) > 1 and select_cb:
         return select_cb(result)
Exemplo n.º 6
0
 def resolve(self, item, captcha_cb=None, select_cb=None):
     item = item.copy()
     url = self._url(item["url"])
     data = util.request(self._url(item["url"]))
     data = util.substr(data, '<div class="video', "</div")
     sosac = re.search('"(http\://[\w]+\.sosac\.ph[^"]+)', data, re.DOTALL)
     if sosac:
         data = util.request(sosac.group(1))
     resolved = resolver.findstreams(
         data,
         [
             '<embed( )*flashvars="file=(?P<url>[^"]+)',
             '<embed( )src="(?P<url>[^"]+)',
             '<object(.+?)data="(?P<url>[^"]+)',
             "<iframe(.+?)src=[\"' ](?P<url>.+?)['\" ]",
             "<object.*?data=(?P<url>.+?)</object>",
         ],
     )
     result = []
     if not resolved:
         self.error("Nothing resolved")
     for i in resolved:
         item = self.video_item()
         item["title"] = i["name"]
         item["url"] = i["url"]
         item["quality"] = i["quality"]
         item["surl"] = i["surl"]
         item["subs"] = i["subs"]
         item["headers"] = i["headers"]
         result.append(item)
     if len(result) == 1:
         return result[0]
     elif len(result) > 1 and select_cb:
         return select_cb(result)
Exemplo n.º 7
0
 def resolve(self,item,captcha_cb=None,wait_cb=None):
     item = item.copy()
     url = self._url(item['url'])
     item['surl'] = url
     data = util.request(url)
     link = re.search('<a class="stahnoutSoubor.+?href=\"([^\"]+)',data)
     if link:
         url = self._url(link.group(1))
         data = util.request(url)
         m = re.search('<img src=\"(?P<img>[^\"]+)\" alt=\"Captcha\"',data)
         cap_id = re.search('<input type=\"hidden\" name=\"_uid_captcha.+?value=\"(?P<cid>[^\"]+)',data)
         if m and cap_id:
             cid = cap_id.group('cid')
             img_data = m.group('img')[m.group('img').find('base64,')+7:]
             if not os.path.exists(self.tmp_dir):
                 os.makedirs(self.tmp_dir)
             tmp_image = os.path.join(self.tmp_dir,'captcha.png')
             util.save_data_to_file(base64.b64decode(img_data),tmp_image)
             code = captcha_cb({'id':cid,'img': tmp_image})
             if not code:
                 return
             data = util.post(url+'?do=stahnoutFreeForm-submit',{'_uid_captcha':cid,'captcha':code,'stahnoutSoubor':'Stáhnout'})
             countdown = re.search('shortly\.getSeconds\(\) \+ (\d+)',data)
             last_url = re.search('<a class=\"stahnoutSoubor2.+?href=\"([^\"]+)',data)
             if countdown and last_url:
                 wait = int(countdown.group(1))
                 url = self._url(last_url.group(1))
                 wait_cb(wait)
                 req = urllib2.Request(url)
                 req.add_header('User-Agent',util.UA)    
                 resp = urllib2.urlopen(req)
                 item['url'] = resp.geturl()
                 return item
Exemplo n.º 8
0
 def resolve(self,item,captcha_cb=None):
     item = item.copy()
     url = item['url']
     if url.startswith('http://www.ulozto.sk'):
         url = 'http://www.ulozto.cz' + url[20:]
     if url.startswith('#'):
         ret = json.loads(util.request(url[1:]))
         if not ret['result'] == 'null':
             url = b64decode(ret['result'])
             url = self._url(url)
     if url.startswith('#'):
         util.error('[uloz.to] - url was not correctly decoded')
         return
     self.init_urllib()
     self.info('Resolving %s'% url)
     logged_in = self.login()
     if logged_in:
         page = util.request(url)
     else:
         try:
             request = urllib2.Request(url)
             response = urllib2.urlopen(request)
             page = response.read()
             response.close()
         except urllib2.HTTPError, e:
             traceback.print_exc()
             return
Exemplo n.º 9
0
    def resolve(self, item, captcha_cb=None, select_cb=None):
        item = item.copy()
        url = self._url(item["url"])
        data = util.request(url)
        video_id = re.search(VIDEO_ID_RE, data, re.IGNORECASE | re.DOTALL).group(1)
        headers = {"Referer": url}
        keydata = util.request("http://embed.stv.livebox.sk/v1/tv-arch.js", headers)
        rtmp_url_regex = "'(rtmp:\/\/[^']+)'\+videoID\+'([^']+)'"
        m3u8_url_regex = "'(http:\/\/[^']+)'\+videoID\+'([^']+)'"
        rtmp = re.search(rtmp_url_regex, keydata, re.DOTALL)
        m3u8 = re.search(m3u8_url_regex, keydata, re.DOTALL)
        m3u8_url = m3u8.group(1) + video_id + m3u8.group(2)

        # rtmp[t][e|s]://hostname[:port][/app[/playpath]]
        # tcUrl=url URL of the target stream. Defaults to rtmp[t][e|s]://host[:port]/app.

        # rtmp url- fix podla mponline2 projektu
        rtmp_url = rtmp.group(1) + video_id + rtmp.group(2)
        stream_part = "mp4:" + video_id
        playpath = rtmp_url[rtmp_url.find(stream_part) :]
        tcUrl = rtmp_url[: rtmp_url.find(stream_part) - 1] + rtmp_url[rtmp_url.find(stream_part) + len(stream_part) :]
        app = tcUrl[tcUrl.find("/", tcUrl.find("/") + 2) + 1 :]

        # rtmp_url = rtmp_url+ ' playpath=' + playpath + ' tcUrl=' + tcUrl + ' app=' + app
        rtmp_url = rtmp_url + " tcUrl=" + tcUrl + " app=" + app
        item["url"] = rtmp_url
        return item
Exemplo n.º 10
0
 def list_episodes(self, url, page=0):
     result = []
     if url.find('ajax.json') != -1:
         headers = {'X-Requested-With':'XMLHttpRequest',
                    'Referer':util.substr(url, url, url.split('/')[-1])
                    }
         httpdata = util.request(url, headers)
         httpdata = util.json.loads(httpdata)['content']
     else:
         httpdata = util.request(url)
         httpdata = util.substr(httpdata, EPISODE_START, EPISODE_END)
        
     entries = 0
     skip_entries = MAX_PAGE_ENTRIES * page
 
     for m in re.finditer(EPISODE_ITER_RE, httpdata, re.DOTALL | re.IGNORECASE):
         entries += 1
         if entries < skip_entries:
             continue
         item = self.video_item()
         item['title'] = "%s. %s (%s)" % (m.group('episode'), m.group('title'), m.group('date'))
         item['url'] = m.group('url')
         self._filter(result, item)
         if entries >= (skip_entries + MAX_PAGE_ENTRIES):
             page += 1
             item = self.dir_item()
             item['type'] = 'next'
             item['url'] = "#episodes##%d#" % (page) + url
             result.append(item)
             break
     return result
Exemplo n.º 11
0
    def resolve(self, item, captcha_cb=None, select_cb=None):
        result = []
        item = item.copy()
        url = item['url']
        if url.endswith('live.html'):
            for quality in ['360','540','720']:
                item = self.video_item()
                item['quality'] = quality + 'p'
                item['url'] = self.rtmp_url(fix_path(re.search('http://(\w+).joj.sk', url).group(1)) + '-' + quality, url)
                result.append(item)
        else:
            data = util.request(url)
            playerdata = re.search(r'<div\ class=\"jn-player\"(.+?)>',data).group(1)
            pageid = re.search(r'data-pageid=[\'\"]([^\'\"]+)',playerdata).group(1) 
            basepath = re.search(r'data-basepath=[\'\"]([^\'\"]+)',playerdata).group(1)
	    videoid = re.search(r'data-id=[\'\"]([^\'\"]+)',playerdata).group(1)
            playlisturl = basepath + 'services/Video.php?clip=' + videoid + 'pageId=' + pageid
            playlist = fromstring(util.request(playlisturl))
            balanceurl = basepath + 'balance.xml?nc=%d' % random.randint(1000, 9999)
            balance = fromstring(util.request(balanceurl))
            for video in playlist.find('files').findall('file'):
                item = self.video_item()
                item['img'] = playlist.attrib.get('large_image')
                item['length'] = playlist.attrib.get('duration')
                item['quality'] = video.attrib.get('quality')
                item['url'] = self.rtmp_url(video.attrib.get('path'), playlist.attrib.get('url'), video.attrib.get('type'), balance)
                result.append(item)
        result.reverse()
        return select_cb(result)
Exemplo n.º 12
0
 def __init__(self, username=None, password=None, filter=None, reverse_eps=False):
     ContentProvider.__init__(self, name='sosac.ph', base_url=MOVIES_BASE_URL, username=username,
                              password=password, filter=filter)
     util.init_urllib(self.cache)
     cookies = self.cache.get(util.CACHE_COOKIES)
     if not cookies or len(cookies) == 0:
         util.request(self.base_url)
     self.reverse_eps = reverse_eps
Exemplo n.º 13
0
 def list(self, url):
     if url.find('subcat') == 0:
         category_id = url.split("#")[1]
         return self.list_subcategories(util.request(self.base_url), category_id)
     elif url.find('calendar') == 0:
         year, month = url.split("#")[1].split("|")
         return self.calendar(int(year), int(month))
     return self.list_content(util.request(self._url(url)))
def resolve(url):
    refererurl = re.search(r'<iframe src="([^"]+)".*', util.request(url), re.I | re.S).group(1)
    try:
        data=[x for x in util.request(refererurl).splitlines() if 'file:' in x and '.mp4' in x][0]
    except:
        return None
    streamurl = re.search(r'.*file:"([^"]+?)".*', data).group(1)
    headers={'Referer': refererurl}
    return [{'url': streamurl, 'headers': headers}]
def resolve(url):
    if supports(url):
        data = util.request(url)
        m = re.search('flashvars.file=\"([^\"]+)',data,re.IGNORECASE | re.DOTALL)
        n = re.search('flashvars.filekey=\"([^\"]+)',data,re.IGNORECASE | re.DOTALL)
        if not m == None and not n == None:
            data = util.request('http://www.novamov.com/api/player.api.php?key=%s&file=%s&user=undefined&pass=undefined&codes=1' % (n.group(1),m.group(1)))
            stream = re.search('url=([^\&]+)',data).group(1)
            return [{'url':stream}]
 def __init__(self, username=None, password=None, filter=None, quickparser=False):
     ContentProvider.__init__(self, 'sledujufilmy.cz', self.urls['Filmy'],
                              username, password, filter)
     # Work around April Fools' Day page
     util.init_urllib(self.cache)
     self.quickparser=quickparser
     cookies = self.cache.get('cookies')
     if not cookies or len(cookies) == 0:
         util.request(self.base_url)
Exemplo n.º 17
0
	def list(self,url):
		if url.find('#film#') == 0:
                        return self.film(util.request(self._url(url[6:])))
		if url.find('#rand#') == 0:
                        return self.film(util.request(self._url(url[6:])+'index.php?id=2236'))
		if url.find('#last#') == 0:
                        return self.film(util.request(self._url(url[6:])))
		else:
                        raise Expception("Invalid url, I do not know how to list it :"+url)
Exemplo n.º 18
0
	def list(self,url):
		if url.find('#film#') == 0:
                        return self.film(util.request(self._url(url[6:])))
		if url.find('#cat#') == 0:
                        return self.cat(util.request(self._url(url[5:])))
		if url.find('#last#') == 0:
                        return self.film(util.request(self._url(url[6:])))
		else:
                        raise Expception("Invalid url, I do not know how to list it :"+url)
Exemplo n.º 19
0
 def list(self, url):
     if url.find('#subcat#') == 0:
         url = url[8:]
         return self.list_subcategories(util.request(self._url(url)), url)
     elif url.find("#date#") == 0:
         year = int(url.split("#")[2])
         month = int(url.split("#")[3])
         return self.date(year, month)
     return self.list_content(util.request(self._url(url)))
Exemplo n.º 20
0
 def resolve(self,item,captcha_cb=None,select_cb=None):
     item = item.copy()
     url = self._url(item['url']).replace('×', '%c3%97')
     data = util.substr(util.request(url), '<div id=\"content\"', '#content')
     visionone_resolved, onevision_resolved, scz_resolved = [],[],[]
     
     onevision = re.search('(?P<url>http://onevision\.ucoz\.ua/[^<]+)', data, re.IGNORECASE)
     if onevision:
         onevision_data = util.substr(util.request(onevision.group('url')),'<td class=\"eText\"','<td class=\"rightColumn\"')
         onevision_resolved=resolver.findstreams(onevision_data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                               '<object(.+?)data=\"(?P<url>[^\"]+)',
                                               '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                               '<object.*?data=(?P<url>.+?)</object>'])
     
     visionone = re.search('(?P<url>http://visionone\.ucoz\.ru/[^<]+)', data, re.IGNORECASE)
     if visionone:
         visionone_data = util.substr(util.request(visionone.group('url')),'<td class=\"eText\"','<td class=\"rightColumn\"')
         visionone_resolved = resolver.findstreams(visionone_data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                               '<object(.+?)data=\"(?P<url>[^\"]+)',
                                               '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                               '<object.*?data=(?P<url>.+?)</object>'])
     scz = re.search('(?P<url>http://scz\.uvadi\.cz/\?p=[\d]+)', data, re.IGNORECASE)
     if scz:
         scz_data = util.substr(util.request(scz.group('url')),'<div id=\"content\"', '#content')
         scz_resolved = resolver.findstreams(scz_data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                               '<object(.+?)data=\"(?P<url>[^\"]+)',
                                               '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                               '<object.*?data=(?P<url>.+?)</object>'])
         
     serialy_resolved = resolver.findstreams(data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                            '<object(.+?)data=\"(?P<url>[^\"]+)',
                                            '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                            '<object.*?data=(?P<url>.+?)</object>',
                                            '<p><code><strong>(?P<url>http.+?)</strong></code></p>',
                                            '<p><code><strong><big>(?P<url>.+?)</big></strong></code></p>'])
     
     resolved = []
     resolved+= serialy_resolved or []
     resolved+= visionone_resolved or []
     resolved+= onevision_resolved or []
     resolved+= scz_resolved or []
     resolved = len(resolved) > 0 and resolved or None
     
     result = []
     for i in resolved:
         item = self.video_item()
         item['title'] = i['name']
         item['url'] = i['url']
         item['quality'] = i['quality']
         item['surl'] = i['surl']
         item['headers'] = i['headers']
         result.append(item) 
     if len(result) == 1:
         return result[0]
     elif len(result) > 1 and select_cb:
         return select_cb(result)
Exemplo n.º 21
0
 def to_downloads(self,url):
     if not self.login():
         util.error('[hellspy] login failed, unable to add to downloads')
     util.info('adding to downloads')
     try:
         util.request(self._url(url+'&do=downloadControl-favourite'))
     except urllib2.HTTPError:
         traceback.print_exc()
         util.error('[hellspy] failed to add to downloads')
         return
     util.info('added, DONE')
Exemplo n.º 22
0
 def resolve(self,item,captcha_cb=None,select_cb=None):
     item = item.copy()
     url = self._url(item['url']).replace('×', '%c3%97')
     data = util.substr(util.request(url), '<div id=\"content\"', '#content')
     
     for script in re.finditer('<script.+?src=\"([^\"]+)',data,re.IGNORECASE|re.DOTALL):
         try:
             data += util.request(script.group(1)).replace('\\\"','\"')
         except:
             pass
     util.init_urllib() # need to reinitialize urrlib, because anyfiles could have left some cookies 
     visionone_resolved, onevision_resolved, scz_resolved = [],[],[]
     
     onevision = re.search('(?P<url>http://onevision\.ucoz\.ua/[^<]+)', data, re.IGNORECASE)
     if onevision:
         onevision_data = util.substr(util.request(onevision.group('url')),'<td class=\"eText\"','<td class=\"rightColumn\"')
         onevision_resolved=self.findstreams(onevision_data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                               '<object(.+?)data=\"(?P<url>[^\"]+)',
                                               '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                               '<object.*?data=(?P<url>.+?)</object>'])
     
     visionone = re.search('(?P<url>http://visionone\.ucoz\.ru/[^<]+)', data, re.IGNORECASE)
     if visionone:
         visionone_data = util.substr(util.request(visionone.group('url')),'<td class=\"eText\"','<td class=\"rightColumn\"')
         visionone_resolved = self.findstreams(visionone_data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                               '<object(.+?)data=\"(?P<url>[^\"]+)',
                                               '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                               '<object.*?data=(?P<url>.+?)</object>'])
     scz = re.search('(?P<url>http://scz\.uvadi\.cz/\?p=[\d]+)', data, re.IGNORECASE)
     if scz:
         scz_data = util.substr(util.request(scz.group('url')),'<div id=\"content\"', '#content')
         scz_resolved = self.findstreams(scz_data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                               '<object(.+?)data=\"(?P<url>[^\"]+)',
                                               '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                               '<object.*?data=(?P<url>.+?)</object>'])
         
     serialy_resolved = self.findstreams(data, ['<embed( )src=\"(?P<url>[^\"]+)',
                                            '<object(.+?)data=\"(?P<url>[^\"]+)',
                                            '<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]',
                                            '<object.*?data=(?P<url>.+?)</object>',
                                            '<p><code><strong>(?P<url>http.+?)</strong></code></p>',
                                            '<p><code><strong><big>(?P<url>.+?)</big></strong></code></p>'])
     
     resolved = []
     resolved+= serialy_resolved or []
     resolved+= visionone_resolved or []
     resolved+= onevision_resolved or []
     resolved+= scz_resolved or []
     resolved = len(resolved) > 0 and resolved or None
     
     if len(resolved) == 1:
         return resolved[0]
     elif len(resolved) > 1 and select_cb:
         return select_cb(resolved)
def resolve(url):
    if not _regex(url) == None:
        data = util.request(url.replace('&#038;','&'))
        data = util.substr(data,'flashvars','params')
        domain = re.search('flashvars\.domain=\"([^\"]+)',data,re.IGNORECASE | re.DOTALL).group(1)
        file = re.search('flashvars\.file=\"([^\"]+)',data,re.IGNORECASE | re.DOTALL).group(1)
        key = re.search('flashvars\.filekey=\"([^\"]+)',data,re.IGNORECASE | re.DOTALL).group(1)
        data = util.request('%s/api/player.api.php?key=%s&file=%s&user=undefined&codes=undefined&pass=undefined'% (domain,key,file))
        m = re.search('url=(?P<url>[^\&]+)',data,re.IGNORECASE | re.DOTALL)
        if not m == None:
            return [{'url':m.group('url')}]
def resolve(url):
    m = _regex(url)
    f=None
    if not m is None:
        try:
            data = util.request('http://www.zkouknito.cz/player/scripts/videoinfo_externi.php?id=%s' % m.group('id'))
            f = re.search('<file>([^<]+)', data, re.IGNORECASE | re.DOTALL)
        except Exception:
            data = util.request(url)
            f = re.search("\'file\':.*?'([^']+)", data, re.IGNORECASE | re.DOTALL)
        if f:
            return [{'url':f.group(1)}] 
Exemplo n.º 25
0
 def list(self, url):
     if url.find('#az#') == 0:
         return self.az()
     elif url.find('#new#') == 0:
         return self.list_new(util.request(self.archive_url))
     elif url.find('#top#') == 0:
         return self.list_top(util.request(self.archive_url))
     elif url.find('#listaz#') == 0:
         url = url[8:]
         return self.list_az(util.request(self.archive_url + url))
     else:
         return self.list_episodes(util.request(self._url(url)))
def resolve(url):
    m = _regex(url)
    if m:
        data = util.request(url)
        sid = re.search("sid=(?P<sid>[^\&]+)", data)
        if sid:
            data = util.request(
                "http://www.vuuzla.com/app/deliver/playlist/%s?sid=%s" % (m.group("id"), sid.group("sid"))
            )
            link = re.search('<video.+?url="(?P<url>[^"]+)', data)
            if link:
                return [{"url": link.group("url")}]
Exemplo n.º 27
0
    def _checkurl(self,url):
        """
        We have to check the das file for an error
        """

        try:
            util.request(url+'.dds')
            return True
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            return False
Exemplo n.º 28
0
    def list(self,url):
        if url.find('#show#') == 0:
		self.od='<div class=\'obsah\''
		self.do='</div>'
		return self.show(util.request(self._url(url[6:])))
        if url.find('#new#') == 0:
		self.od='<center>Nejnovější epizody'
		self.do='<script language="JavaScript">'
		return self.show(util.request(self._url(url[:5])))
        if url.find('#cat#') == 0:
		return self.episodes(util.request(self._url(url[5:])))
        else:
		raise Expception("Invalid url, I do not know how to list it :"+url)
Exemplo n.º 29
0
 def list(self,url):
     if url.find('category/new-episode') == 0:
         return self.new_episodes(util.request(self._url(url)))
     result = []
     page = util.request(self._url(url))
     data = util.substr(page,'<div id=\"archive-posts\"','</div>')
     m = re.search('<a(.+?)href=\"(?P<url>[^\"]+)', data, re.IGNORECASE | re.DOTALL)
     if m:
         data = util.request(m.group('url'))
         for m in re.finditer('<a href=\"(?P<url>[^\"]+)(.+?)(<strong>|<b>)(?P<name>[^<]+)', util.substr(data,'<div class=\"entry-content','</div>'), re.IGNORECASE | re.DOTALL):
             item = self.video_item()
             item['title'] = util.decode_html(m.group('name'))
             item['url'] = m.group('url')
             self._filter(result,item)
     return result
Exemplo n.º 30
0
 def library_movies_all_xml(self):
     page = util.request('http://tv.prehraj.me/filmyxml.php')
     pagedata = util.substr(page, '<select name=\"rok\">', '</select>')
     pageitems = re.finditer('<option value=\"(?P<url>[^\"]+)\">(?P<name>[^<]+)</option>', 
                             pagedata, re.IGNORECASE | re.DOTALL)
     pagetotal = float(len(list(pageitems)))
     pageitems = re.finditer('<option value=\"(?P<url>[^\"]+)\">(?P<name>[^<]+)</option>', 
                             pagedata, re.IGNORECASE | re.DOTALL)
     util.info("PocetRoku: %d" % pagetotal)
     pagenum = 0
     for m in pageitems:
         pagenum += 1
         if self.parent.dialog.iscanceled():
             return
         pageperc = float(pagenum / pagetotal) * 100
         util.info("Rokpercento: %d" % int(pageperc))
         data = util.request('http://tv.prehraj.me/filmyxml.php?rok=' +
                             m.group('url') + '&sirka=670&vyska=377&affid=0#')
         tree = ET.fromstring(data)
         total = float(len(list(tree.findall('film'))))
         util.info("TOTAL: %d" % total)
         num = 0
         for film in tree.findall('film'):
             num += 1
             perc = float(num / total) * 100
             util.info("percento: %d" % int(perc))
             if self.parent.dialog.iscanceled():
                 return
                 item = self.video_item()
             try:
                 if ISO_639_1_CZECH in self.ISO_639_1_CZECH:
                     title = film.findtext('nazevcs').encode('utf-8')
                 else:
                     title = film.findtext('nazeven').encode('utf-8')
                 self.parent.dialog.update(int(perc), str(pagenum) + '/' + str(int(pagetotal)) +
                                           ' [' + m.group('url') + '] ->  ' + title)
                 item['title'] = '%s (%s)' % (title, film.findtext('rokvydani'))
                 item['name'] = item['title']
                 item['url'] = 'http://movies.prehraj.me/' + self.ISO_639_1_CZECH + \
                     'player/' + self.parent.make_name(title + '-' + film.findtext('rokvydani'))
                 item['menu'] = {"[B][COLOR red]Add to library[/COLOR][/B]": {
                     'url': item['url'], 'action': 'add-to-library', 'name': item['title']}}
                 item['update'] = True
                 item['notify'] = False
                 self.parent.add_item(item)
             except Exception, e:
                 util.error("ERR TITLE: " + item['title'] + " | " + str(e))
                 pass
Exemplo n.º 31
0
def resolve(url):
    m = _regex(url)
    if m:
        util.init_urllib()
        data = util.request(url)
        if data.find('Toto video neexistuje') > 0:
            util.error('Video bylo smazano ze serveru')
            return
        player = 'http://www.streamuj.tv/new-flash-player/mplugin4.swf'
        headers = {
            'User-Agent':
            util.UA,
            'Referer':
            'http://www.streamuj.tv/mediaplayer/player.swf',
            'Cookie':
            ','.join("%s=%s" % (c.name, c.value) for c in util._cookie_jar)
        }
        burl = 'http://' + API_SERVER + "/pauth"
        key = util.request(
            'http://www.streamuj.tv/_key.php?auth=3C27f5wk6qB3g7nZ5SDYf7P7k1572rFH1QxV0QQ'
        )
        index = 0
        result = []
        qualities = re.search(r'rn\:[^\"]*\"([^\"]*)', data,
                              re.IGNORECASE | re.DOTALL)
        langs = re.search(r'langs\:[^\"]*\"([^\"]+)', data,
                          re.IGNORECASE | re.DOTALL)
        languages = [
            ''
        ]  # pretend there is at least language so we read 1st stream info
        if langs:
            languages = langs.group(1).split(',')
        for language in languages:
            streams = re.search(
                r'res{index}\:[^\"]*\"([^\"]+)'.format(index=index), data,
                re.IGNORECASE | re.DOTALL)
            subs = re.search(
                r'sub{index}\:[^\"]*\"([^\"]+)'.format(index=index), data,
                re.IGNORECASE | re.DOTALL)
            if subs:
                subs = re.search(r'[^>]+>([^,$]+)', subs.group(1),
                                 re.IGNORECASE | re.DOTALL)
            else:
                subs = None
            if streams and qualities:
                streams = streams.group(1).split(',')
                rn = qualities.group(1).split(',')
                qindex = 0
                for stream in streams:
                    res = json.loads(
                        util.post_json(burl, {
                            'link': stream,
                            'player': player,
                            'key': key
                        }))
                    stream = res['link']
                    q = rn[qindex]
                    if q == 'HD':
                        q = '720p'
                    else:
                        q = 'SD'
                    item = {
                        'url': stream,
                        'quality': q,
                        'headers': headers,
                        'lang': language
                    }
                    if subs:
                        link = subs.group(1)
                        response = json.loads(
                            util.post_json(burl, {
                                'link': link,
                                'player': player,
                                'key': key
                            }))
                        if 'link' in response:
                            item['lang'] += ' + subs'
                            item['subs'] = response[u'link']
                        else:
                            util.error(
                                "Could not fetch subtitles from '{}'".format(
                                    link))
                            util.error("Server response: {}".format(response))
                    result.append(item)
                    qindex += 1
            index += 1
        return result
Exemplo n.º 32
0
def getTrakt(url, post=None, output='content', method=None):
    try:
        use_ssl = sctop.getSettingAsBool('UseSSL')
        url = urlparse.urljoin(
            'http%s://api.trakt.tv' % ('s' if use_ssl else ''), url)

        headers = {'trakt-api-key': sctop.trCL, 'trakt-api-version': '2'}

        if getTraktCredentialsInfo() == False:
            util.debug("[SC] gt 1 data: %s %s" % (str(url), str(post)))
            if post is not None:
                result = util.post_json(url, post, headers)
            else:
                result = util.request(url, headers)
            util.debug("[SC] gt 1 result: %s" % str(result))
            return result

        headers['Authorization'] = 'Bearer %s' % sctop.getSetting(
            'trakt.token')
        #util.debug('[SC] token %s' % sctop.getSetting('trakt.token'))

        if post is not None:
            result, code = sctop.post_json(url, post, headers, "extend")
            info = None
        else:
            result, code, info = sctop.request(url,
                                               headers,
                                               "info",
                                               method=method)
        #util.debug("[SC] trakt gt result: %s %s" % (str(result), str(code)))
        if not (code == 401 or code == 405):
            if output == "content":
                return result
            else:
                return (result, code, info)

        oauth = 'http%s://api.trakt.tv/oauth/token' % ('s' if use_ssl else '')
        opost = {
            'client_id': sctop.trCL,
            'client_secret': sctop.trSC,
            'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
            'grant_type': 'refresh_token',
            'refresh_token': sctop.getSetting('trakt.refresh')
        }

        result, code = sctop.post_json(oauth, opost, headers, "extend")
        if code == 401:
            authTrakt()
            result, code = sctop.post_json(oauth, opost, headers, "extend")
        result = json.loads(result)

        token, refresh = result['access_token'], result['refresh_token']

        sctop.setSetting(setting='trakt.token', value=token)
        sctop.setSetting(setting='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        util.debug('[SC] token: %s' % token)

        result = sctop.post_json(url, post, headers)
        return result
    except Exception as e:
        util.error(e)
        pass
Exemplo n.º 33
0
 def parse_html(url):
     return BeautifulSoup(util.request(url))
Exemplo n.º 34
0
def resolve(url):
    if not _regex(url) == None:
        data = util.substr(util.request(url), '<embed type=\"video/divx', '>')
        link = re.search('src=\"([^\"]+)', data, re.IGNORECASE | re.DOTALL)
        if link:
            return [{'url': link.group(1)}]
Exemplo n.º 35
0
    def list_show(self, url, list_series=False, list_episodes=False):
        result = []
        self.info("list_show %s" % (url))
        data = util.request(url)
        if list_series:
            series_data = util.substr(
                data,
                r'<select name="season" data-ajax data-ajax-result-elements="headerPart,episodeListing">',
                '</select>')
            for serie_match in re.finditer(
                    r'option\s(?:selected\s)?value="(?P<url>[^"]+)">(?P<title>[^<]+)<',
                    series_data):
                item = self.dir_item()
                item['title'] = serie_match.group('title')
                item['url'] = self._fix_url(serie_match.group('url'))
                result.append(item)
        if list_episodes:
            episodes_data = util.substr(
                data, r'<section class="s s-container s-archive-serials">',
                "</section>")
            for article_match in re.finditer(
                    r'<article class=".+?media-on">(.+?)</article>',
                    episodes_data, re.DOTALL):
                article_dict = self._list_article(article_match.group(1))
                if article_dict is not None:
                    item = self.video_item()
                    item.update(article_dict)
                    item['title'] += ' ' + item.get('subtitle', '')
                    result.append(item)

            title_to_key = {
                'Dátum': 'date',
                'Názov epizódy': 'title',
                'Sledovanosť': 'seen',
                'Séria': 'season',
                'Epizóda': 'episode'
            }
            headers_match = re.search(
                '<div class="i head e-video-categories">(.+?)</div>',
                episodes_data, re.DOTALL)
            if headers_match is not None:
                headers = []
                for span_match in re.finditer('<span[^>]*>([^<]+)</span>',
                                              headers_match.group(1)):
                    key = title_to_key.get(span_match.group(1))
                    if key is None:
                        print "undefined key", span_match.group(1)
                        headers.append("")
                    else:
                        headers.append(key)
                archive_list_pattern = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)[^>]+>\s+'
                for key in headers:
                    if key in ("", "title"):
                        archive_list_pattern += r'^.+?$\s+'
                    else:
                        archive_list_pattern += r'<span>(?P<%s>[^<]*)</span>\s+' % key
                for archive_list_match in re.finditer(archive_list_pattern,
                                                      episodes_data,
                                                      re.MULTILINE):
                    item = self.video_item()
                    groupdict = archive_list_match.groupdict()
                    if 'season' in groupdict and 'episode' in groupdict:
                        # joj sometimes don't provide season/episode numbers
                        # for latest episodes, so mark them as 0.
                        try:
                            season = int(archive_list_match.group('season'))
                        except Exception:
                            season = 0
                        try:
                            episode = int(archive_list_match.group('episode'))
                        except Exception:
                            episode = 0
                        item['title'] = "(S%02d E%02d) - %s" % (
                            season, episode, archive_list_match.group('title'))
                    else:
                        item['title'] = "(%s) - %s" % (
                            archive_list_match.group('date'),
                            archive_list_match.group('title'))
                    item['url'] = self._fix_url(
                        archive_list_match.group('url'))
                    result.append(item)

            pagination_data = util.substr(data, '<nav>', '</nav>')
            next_match = re.search(
                r'a href="(?P<url>[^"]+)" aria-label="Ďalej"', pagination_data,
                re.DOTALL)
            if next_match:
                item = self.dir_item()
                item['type'] = 'next'
                item['url'] = self._fix_url(next_match.group(1))
                result.append(item)
        return result
Exemplo n.º 36
0
 def parse(self, url):
     return BeautifulSoup(util.request(url),
                          'html5lib',
                          from_encoding='utf-8')
Exemplo n.º 37
0
 def download(remote, local):
     util.save_data_to_file(util.request(remote), local)
Exemplo n.º 38
0
    def list_show(self, url, list_series=False, list_episodes=False):
        result = []
        self.info("list_show %s" % (url))
        data = util.request(url)
        if list_series:
            series_data = util.substr(
                data, r'<select onchange="return selectSeason(this.value);">',
                '</select>')
            for serie_match in re.finditer(
                    r'<option value="(?P<season_id>\d+)?"\s(selected="selected")?>\s+(?P<title>[^<]+)\n',
                    series_data):
                item = self.dir_item()
                season_id = serie_match.group('season_id')
                if not season_id:
                    season_id = ""
                item['title'] = serie_match.group('title')
                item['url'] = "%s?seasonId=%s" % (url.split('#')[0], season_id)
                result.append(item)
        if list_episodes:
            if url.find('-page=') > 0 and url.find('-listing') > 0:
                episodes_data = data
            else:
                episodes_data = util.substr(data, r'<section>', '</section>')

            for article_match in re.finditer(
                    r'<article class="b-article title-xs article-lp">(.+?)</article>',
                    episodes_data, re.DOTALL):
                article_dict = self._list_article(article_match.group(1))
                if article_dict is not None:
                    item = self.video_item()
                    item.update(article_dict)
                    item['title'] += ' ' + item.get('subtitle', '')
                    result.append(item)

            title_to_key = {
                'Dátum': 'date',
                'Názov epizódy': 'title',
                'Sledovanosť': 'seen',
                'Séria': 'season',
                'Epizóda': 'episode'
            }
            headers_match = re.search(
                '<div class="i head e-video-categories">(.+?)</div>',
                episodes_data, re.DOTALL)
            if headers_match is not None:
                headers = []
                for span_match in re.finditer('<span[^>]*>([^<]+)</span>',
                                              headers_match.group(1)):
                    key = title_to_key.get(span_match.group(1))
                    if key is None:
                        print "undefined key", span_match.group(1)
                        headers.append("")
                    else:
                        headers.append(key)
                archive_list_pattern = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)[^>]+>\s+'
                for key in headers:
                    if key in ("", "title"):
                        archive_list_pattern += r'^.+?$\s+'
                    else:
                        archive_list_pattern += r'<span>(?P<%s>[^<]*)</span>\s+' % key
                for archive_list_match in re.finditer(archive_list_pattern,
                                                      episodes_data,
                                                      re.MULTILINE):
                    item = self.video_item()
                    groupdict = archive_list_match.groupdict()
                    if 'season' in groupdict and 'episode' in groupdict:
                        # joj sometimes don't provide season/episode numbers
                        # for latest episodes, so mark them as 0.
                        try:
                            season = int(archive_list_match.group('season'))
                        except Exception:
                            season = 0
                        try:
                            episode = int(archive_list_match.group('episode'))
                        except Exception:
                            episode = 0
                        item['title'] = "(S%02d E%02d) - %s" % (
                            season, episode, archive_list_match.group('title'))
                    else:
                        item['title'] = "(%s) - %s" % (
                            archive_list_match.group('date'),
                            archive_list_match.group('title'))
                    item['url'] = self._fix_url(
                        archive_list_match.group('url'))
                    result.append(item)
            if url.find('-page=') > 0 and url.find('-listing') > 0:
                pagination_data = data
            else:
                pagination_data = util.substr(data, r'<section>', '</section>')
            next_match = re.search(
                r'a.*data-href="(?P<url>[^"]+)".*title="Načítaj viac"',
                pagination_data, re.DOTALL)
            if next_match:
                item = self.dir_item()
                item['type'] = 'next'
                item['url'] = self._fix_url_next(url, next_match.group(1))
                result.append(item)
        return result
Exemplo n.º 39
0
    def list(self, url):
        if url.find('#fm#') == 0:
            return self.list_folder(url[5:])
        url = self._url(url)
        page = util.request(url,
                            headers={
                                'X-Requested-With': 'XMLHttpRequest',
                                'Referer': url,
                                'Cookie': 'uloz-to-id=1561277170;'
                            }).decode('string-escape')
        script = util.substr(page, 'var kn', '</script>')
        keymap = None
        key = None
        k = re.search(r'({.+?})', script)
        if k:
            keymap = util.json.loads(k.group(1))
        j = re.search(r'ad.push\(\[kn, kn\["([^"]+)', script)
        if j:
            key = j.group(1)
        if not (j and k):
            self.error('error parsing page - unable to locate keys')
            return []
        burl = b64decode(
            'I2h0dHA6Ly9kZWNyLWNlY2gucmhjbG91ZC5jb20vZGVjcnlwdC8/a2V5PSVzJnZhbHVlPSVz'
        )
        murl = b64decode(
            'aHR0cDovL2RlY3ItY2VjaC5yaGNsb3VkLmNvbS9kZWNyeXB0Lw==')
        result = []
        req = {'seed': keymap[key], 'values': keymap}
        decr = json.loads(util.post_json(murl, req))
        for li in re.finditer('<div data-icon=\"(?P<key>[^\"]+)', page,
                              re.IGNORECASE | re.DOTALL):
            body = urllib.unquote(b64decode(decr[li.group('key')]))
            div_name = util.substr(body, '<div class="name"', '</div>')
            title_url_match = re.search(
                r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)', div_name)

            if not title_url_match:
                continue
            item = self.video_item()
            item['title'] = title_url_match.group('title')
            item['url'] = title_url_match.group('url')

            div_media = util.substr(body, 'div class="media"',
                                    '<div class="tools">')
            img_match = re.search(r'img src="([^"]+)', div_media)
            if img_match:
                item['img'] = "http:" + img_match.group(1)
            time_match = re.search(r'<span>Čas</span>(.+)', div_media)
            if time_match:
                item['length'] = time_match.group(1).strip()
            size_match = re.search(r'<span>Velikost</span>([^<]+)', div_media)
            if size_match:
                item['size'] = size_match.group(1).strip()
            self._filter(result, item)
        # page navigation
        data = util.substr(page, '<div class=\"paginator', '</div')
        mnext = re.search('<a href=\"(?P<url>[^\"]+)\" class="next', data)
        if mnext:
            item = self.dir_item()
            item['type'] = 'next'
            item['url'] = util.decode_html(mnext.group('url'))
            result.append(item)
        return result
Exemplo n.º 40
0
 def cache_request_12(self, url):
     return util.request(url)
Exemplo n.º 41
0
    def resolve(self, item, captcha_cb=None):
        item = item.copy()
        url = item['url']
        if url.startswith('http://www.ulozto.sk'):
            url = self.base_url + url[20:]
        url = self.decr_url(url)
        url = self._url(url)
        if url.startswith('#'):
            util.error('[uloz.to] - url was not correctly decoded')
            return
        self.init_urllib()
        self.login()
        self.info('Resolving %s' % url)
        if not item.has_key('vip'):
            item['vip'] = False
        vip = item['vip']
        if vip:
            page = util.request(url)
        else:
            try:
                request = Request(url)
                response = urlopen(request)
                page = response.read()
                response.close()
            except HTTPError as e:
                traceback.print_exc()
                return
        if page.find('Stránka nenalezena!') > 0:
            self.error('page with movie was not found on server')
            return

        if vip:
            data = util.substr(page, '<h3>Neomezené stahování</h3>', '</div')
            m = re.search('<a(.+?)href=\"(?P<url>[^\"#]+)\"', data,
                          re.IGNORECASE | re.DOTALL)
            if m:
                try:
                    self.rh.throw = True
                    resp = urlopen(Request(self._url(m.group('url'))))
                except RedirectionException:
                    # this is what we need, our redirect handler raises this
                    pass
                except HTTPError:
                    # this is not OK, something went wrong
                    traceback.print_exc()
                    self.error(
                        'Cannot resolve stream url, server did not redirected us'
                    )
                    self.info('POST url:' + post_url)
                    return
                stream = self.rh.location
                item['url'] = self._fix_stream_url(stream)
                item['surl'] = url
                return item

        else:
            m = re.search(
                '<form action="(?P<action>[^"]+)[^>]+class="jsFreeDownloadForm"',
                page)
            if m:
                self.rh.throw = True
                stream_url = self._get_file_url_anonymous(
                    page, self._url(m.group('action')), response.headers,
                    captcha_cb)
                if stream_url:
                    item['url'] = stream_url
                    # free ulozto allows seeking but doesn't allow multiple connections.
                    # kodi does this when seeking is possible so playback doesn't work.
                    # To prevent from use of multiple connections we set header special for kodi
                    # which disables seeking -> only one connection -> playback works, though we lose
                    # seeking possibility.

                    # more info - http://forum.kodi.tv/showthread.php?tid=236411
                    item['headers'] = {'seekable': '0'}
                    item['surl'] = url
                    return item
 def list(self, url):
     if url.find("#related#") == 0:
         return self.list_related(util.request(url[9:]))
     else:
         return self.list_content(util.request(self._url(url)),
                                  self._url(url))
Exemplo n.º 43
0
 def get_data_cached(self, url):
     return util.request(url)
Exemplo n.º 44
0
 def diff(self) -> unidiff.PatchSet:
     response = util.request(
         'https://patch-diff.githubusercontent.com/raw/%s/pull/%s.diff' %
         (self._repo, self._pr_number))
     return unidiff.PatchSet(response.content.decode('utf-8'))
Exemplo n.º 45
0
 def fetch(req, *args):
     return util.request(req), args
Exemplo n.º 46
0
    def resolve(self, item, captcha_cb=None, select_cb=None):
        result = []
        item = item.copy()
        url = item['url']
        if url.endswith('live.html'):
            channel = urlparse.urlparse(url).netloc.split('.')[0]
            if channel in 'plus':
                channel = 'jojplus'
            channel_quality_map = {
                'joj': ('360', '540', '720'),
                'jojplus': ('360', '540'),
                'wau': ('360', '540')
            }
            for quality in channel_quality_map[channel]:
                item = self.video_item()
                item['quality'] = quality + 'p'
                item[
                    'url'] = 'https://nn.geo.joj.sk/live/hls/' + channel + '-' + quality + '.m3u8'
                result.append(item)
        else:
            data = util.request(url)
            data = util.substr(
                data, '<section class="s-section py-0 s-video-detail">',
                '</section>')
            iframe_url = re.search('<iframe src="([^"]+)"', data).group(1)
            #print 'iframe_url = ', iframe_url
            player_str = urllib2.urlopen(iframe_url).read()
            #print player_str

            labels_str = re.search(r'var labels = {(.+?)};', player_str,
                                   re.DOTALL).group(1)
            #print 'labels:', labels_str
            renditions = re.search(r'renditions: \[(.+?)\]',
                                   labels_str).group(1).replace(
                                       "'", "").replace('"', '').split(',')
            #print 'renditions: ', renditions

            settings_str = re.search(r'var settings = {(.+?)};', player_str,
                                     re.DOTALL).group(1)
            #print 'settings:', settings_str
            poster_url = re.search(r'poster: \"(.+?)\"', settings_str).group(1)
            #print 'poster_url:', poster_url

            bitrates_str = re.search(r'var src = {(.+?)};', player_str,
                                     re.DOTALL).group(1)
            #print 'bitrates:', bitrates_str
            bitrates_url = re.search(r'"mp4": \[(.+?)\]', bitrates_str,
                                     re.DOTALL).group(1)
            bitrates_url = bitrates_url.replace("'",
                                                "").replace('\n', '').replace(
                                                    ' ', '').split(',')
            for idx, url in enumerate(bitrates_url):
                item = self.video_item()
                item['img'] = poster_url
                item['quality'] = renditions[idx]
                item['url'] = url
                result.append(item)
            result.reverse()
        if select_cb:
            return select_cb(result)
        return result
Exemplo n.º 47
0
def authTrakt():
    util.debug("[SC] trakt authTrakt 1")
    try:
        if getTraktCredentialsInfo() == True:
            util.debug("[SC] trakt at 2")
            if sctop.yesnoDialog(
                    sctop.getString(30932).encode('utf-8'),
                    sctop.getString(30933).encode('utf-8'), '', 'Trakt'):
                util.debug("[SC] trakt at 3")
                sctop.setSetting('trakt.user', value='')
                sctop.setSetting('trakt.token', value='')
                sctop.setSetting('trakt.refresh', value='')
            raise Exception("[SC] ERR dialog")

        util.debug("[SC] trakt at 4")
        result = getTrakt('/oauth/device/code', {'client_id': sctop.trCL})
        util.debug("[SC] trakt at 5: %s" % str(result))
        result = json.loads(result)
        util.debug("[SC] trakt at 6: %s" % str(result))
        verification_url = (sctop.getString(30930) %
                            result['verification_url']).encode('utf-8')
        user_code = (sctop.getString(30931) %
                     result['user_code']).encode('utf-8')
        expires_in = int(result['expires_in'])
        device_code = result['device_code']
        interval = result['interval']

        progressDialog = sctop.progressDialog
        progressDialog.create('Trakt', verification_url, user_code)

        for i in range(0, expires_in):
            try:
                if progressDialog.iscanceled(): break
                sctop.sleep(500)
                if not float(i) % interval == 0: raise Exception()
                r = getTrakt(
                    '/oauth/device/token', {
                        'client_id': sctop.trCL,
                        'client_secret': sctop.trSC,
                        'code': device_code
                    })
                r = json.loads(r)
                if 'access_token' in r: break
            except:
                pass

        try:
            progressDialog.close()
        except:
            pass

        token, refresh = r['access_token'], r['refresh_token']
        util.debug("[SC] token: %s refresh: %s" % (str(token), str(refresh)))

        headers = {
            'trakt-api-key': sctop.trCL,
            'trakt-api-version': '2',
            'Authorization': 'Bearer %s' % token
        }

        result = util.request('http://api-v2launch.trakt.tv/users/me', headers)
        result = json.loads(result)

        user = result['username']

        sctop.setSetting('trakt.user', value=user)
        sctop.setSetting('trakt.token', value=token)
        sctop.setSetting('trakt.refresh', value=refresh)
        util.debug("[SC] auth: %s %s %s" %
                   (str(user), str(token), str(refresh)))
        raise Exception("[SC] ERR koniec")
    except:
        util.debug("ERROR: %s" % str(traceback.format_exc()))
        sctop.openSettings('0.0')
Exemplo n.º 48
0
 def resolve(self, item, captcha_cb=None, select_cb=None):
     result = []
     item = item.copy()
     if item['url'].startswith('live.'):
         channel_id = item['url'].split('.')[1]
         data = util.request(
             "http://www.rtvs.sk/json/live5f.json?c=%s&b=mozilla&p=linux&v=47&f=1&d=1"
             % (channel_id))
         videodata = util.json.loads(data)['clip']
         if is_kodi_leia():
             #return playlist with adaptive flag
             item = self.video_item()
             item['title'] = videodata.get('title', '')
             item['url'] = videodata['sources'][0]['src']
             item['quality'] = 'adaptive'
             item['img'] = videodata.get('image', '')
             result.append(item)
         else:
             #process m3u8 playlist
             for stream in get_streams_from_manifest_url(
                     videodata['sources'][0]['src']):
                 item = self.video_item()
                 item['title'] = videodata.get('title', '')
                 item['url'] = stream['url']
                 item['quality'] = stream['quality']
                 item['img'] = videodata.get('image', '')
                 result.append(item)
     else:
         video_id = item['url'].split('/')[-1]
         self.info("<resolve> videoid: %s" % video_id)
         videodata = util.json.loads(
             util.request("http://www.rtvs.sk/json/archive.json?id=" +
                          video_id))
         for v in videodata['playlist']:
             url = "%s/%s" % (v['baseUrl'], v['url'].replace(
                 '.f4m', '.m3u8'))
             #http://cdn.srv.rtvs.sk:1935/vod/_definst_//smil:fZGAj3tv0QN4WtoHawjZnKy35t7dUaoB.smil/manifest.m3u8
             if '/smil:' in url:
                 if is_kodi_leia():
                     #return playlist with adaptive flag
                     item = self.video_item()
                     item['title'] = v['details']['name']
                     item['surl'] = item['title']
                     item['quality'] = 'adaptive'
                     item['url'] = url
                     result.append(item)
                 else:
                     #process m3u8 playlist
                     for stream in get_streams_from_manifest_url(url):
                         item = self.video_item()
                         item['title'] = v['details']['name']
                         item['surl'] = item['title']
                         item['url'] = stream['url']
                         item['quality'] = stream['quality']
                         result.append(item)
             else:
                 item = self.video_item()
                 item['title'] = v['details']['name']
                 item['surl'] = item['title']
                 item['quality'] = '???'
                 item['url'] = url
                 result.append(item)
     self.info("<resolve> playlist: %d items" % len(result))
     map(self.info, [
         "<resolve> item(%d): title= '%s', url= '%s'" %
         (i, it['title'], it['url']) for i, it in enumerate(result)
     ])
     if len(result) > 0 and select_cb:
         return select_cb(result)
     return result
Exemplo n.º 49
0
    def resolve(self, item, captcha_cb=None, select_cb=None):
        result = []
        resolved = []
        item = item.copy()
        url = self._url(item['url'])
        data = util.substr(util.request(url), 'async type', '</script>')
        print 'data start ----'
        print data
        print 'data end ----'
        playlist = re.search(
            '''new mfJWPlayer.+?(?P<jsondata>playlist:.+?)events:''', data,
            re.MULTILINE | re.DOTALL)
        print 'playlist start ----'
        print playlist
        print 'playlist end ----'
        jsondata = re.sub(
            ' +', ' ', '{%s' %
            playlist.group('jsondata').replace('file:', '"file":').replace(
                'label:', '"label":').replace('kind:', '"kind":').replace(
                    'default:', '"default":').replace(
                        'true', '"true"').replace('],', ']')) + '}'
        print 'jsondata start ----'
        print jsondata
        print 'jsondata end ----'
        jsondata = demjson.decode(jsondata)

        for playlist_item in jsondata['playlist']:
            playlist_item['file'] = playlist_item['file'].replace(
                'time_continue=1&', '')

            from Plugins.Extensions.archivCZSK.engine import client
            video_formats = client.getVideoFormats(playlist_item['file'])
            video_url = [video_formats[-1]]
            print video_url
            subs = playlist_item['tracks']
            if video_url and subs:
                for i in video_url:
                    i['subs'] = self.base_url[:-1] + subs[0]['file']
            resolved += video_url[:]

            if not resolved:
                raise ResolveException('Video nenalezeno')

            for i in resolved:
                item = self.video_item()
                try:
                    item['title'] = i['title']
                except KeyError:
                    pass
                item['url'] = i['url']
                item['quality'] = i['format_note']
                item['subs'] = i['subs']
                item['headers'] = {}
                try:
                    item['fmt'] = i['fmt']
                except KeyError:
                    pass
                result.append(item)

        if len(result) > 0 and select_cb:
            return select_cb(result)

        return result
Exemplo n.º 50
0
 def search(self, keyword):
     return self.list_searchresults(util.request(self._url('/srch/' + urllib.parse.quote(keyword))))
Exemplo n.º 51
0
 def _get_meta(self, name, link):
     # load meta from disk or download it (slow for each tale, thatswhy we cache it)
     # not neccesary anymore its quite quick now,,
     data = util.request(link)
     return self._get_image(data), self._get_plot(data)
Exemplo n.º 52
0
    def _get_file_url_anonymous(self,page,post_url,headers,captcha_cb):

        data = util.request(self._url('reloadXapca.php'))
        capdata = json.loads(data)
        captcha = capdata['image']
        if not captcha.startswith('http'):
            captcha = 'http:' + captcha
        sound = capdata['sound']
        if not sound.startswith('http'):
            sound = 'http:' + sound
        # ask callback to provide captcha code
        self.info('Asking for captcha img %s' % captcha)
        code = captcha_cb({'id':captcha,'img': captcha,'snd':sound})
        if not code:
            self.info('Captcha not provided, done')
            return

        ts = re.search('<input type=\"hidden\" name=\"ts\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        cid = re.search('<input type=\"hidden\" name=\"cid\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        sign = re.search('<input type=\"hidden\" name=\"sign\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        sign_a = re.search('<input type=\"hidden\" name=\"sign_a\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        has = capdata['hash']
        salt = capdata['salt']
        timestamp = capdata['timestamp']
        token = re.search('<input type=\"hidden\" name=\"_token_\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        if not (sign and ts and cid and has and token):
            util.error('[uloz.to] - unable to parse required params from page, plugin needs fix')
            return
        request = {
            'captcha_type':'xapca',
            'hash':has,
            'salt':salt,
            'timestamp':timestamp,
            'ts':ts.group(1),
            'cid':'',
            'sign':sign.group(1),
            'sign_a':sign_a.group(1),
            'captcha_value':code,
            '_do':'download-freeDownloadTab-freeDownloadForm-submit',
            '_token_':token.group(1),
            'adi':'f'
        }
        req = urllib2.Request(post_url,urllib.urlencode(request))
        req.add_header('User-Agent',util.UA)
        req.add_header('Referer',post_url)
        req.add_header('Accept','application/json')
        req.add_header('X-Requested-With','XMLHttpRequest')
        sessid=[]
        for cookie in re.finditer('(ULOSESSID=[^\;]+)',headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
            sessid.append(cookie.group(1))
        req.add_header('Cookie','nomobile=1; uloztoid='+cid.group(1)+'; uloztoid2='+cid.group(1)+'; '+sessid[-1])
        util.info(req.headers)
        util.info(request)
        try:
            resp = urllib2.urlopen(req)
            page = resp.read()
            headers = resp.headers
        except urllib2.HTTPError:
            # this is not OK, something went wrong
            traceback.print_exc()
            util.error('[uloz.to] cannot resolve stream url, server did not redirected us')
            util.info('[uloz.to] POST url:'+post_url)
            return
        try:
            result = json.loads(page)
        except:
            raise ResolveException('Unexpected error, addon needs fix')
        if not 'status' in result.keys():
            raise ResolveException('Unexpected error, addon needs fix')
        if result['status'] == 'ok':
            return self._fix_stream_url(result['url'])
        elif result['status'] == 'error':
            # the only known state is wrong captcha for now
            util.error('Captcha validation failed, please try playing/downloading again')
            util.error(result)
            raise ResolveException('Captcha failed, try again')
    def resolve(self, item, captcha_cb=None, select_cb=None):
        result = []
        resolved = []
        item = item.copy()
        url = self._url(item['url'])
        self.info('== resolve titulkomet ===>' + url)
        original_yt = False

        data = util.substr(util.request(url),
                           'jQuery( document ).ready(function()', '</script>')

        urls = re.findall('file:[ ]+\"(?P<url>[^\"].+?)\"', data,
                          re.IGNORECASE | re.DOTALL | re.MULTILINE)
        self.info(urls)
        if original_yt:
            url2 = urls[0]
            # e = 'watch?v='
            e = 'youtu.be/'
            edx = url2.find(e)
            video_id = url2[edx + len(e):]

    # video_url = resolver.findstreams([urls[0].replace('https://youtu.be/', 'https://www.youtube.com/watch?v=')])
        vid = YDStreamExtractor.getVideoInfo(
            url,
            quality=3)  #quality is 0=SD, 1=720p, 2=1080p, 3=Highest Available
        video_url = [vid.streams()[0]]
        subs = urls[1]
        # self.info(video_url)

        self.info(subs)
        if video_url and subs:
            for i in video_url:
                i['subs'] = subs
        resolved += video_url[:]

        if not resolved:
            raise ResolveException('Video nenalezeno')

        for i in resolved:
            item = self.video_item()
            try:
                item['title'] = i['title']
            except KeyError:
                pass
            item['url'] = i['xbmc_url']  # i['url']
            if original_yt:
                item[
                    'url'] = "plugin://plugin.video.youtube/?action=play_video&videoid=" + video_id

            #item['quality'] = i['quality']
            #item['surl'] = i['surl']
            item['quality'] = i['ytdl_format']['height']
            item['surl'] = i['ytdl_format']['webpage_url']
            item['subs'] = i['subs']
            item['headers'] = {}  #i['headers']
            self.info(item)
            try:
                item['fmt'] = i['fmt']
            except KeyError:
                pass
            result.append(item)

        return result