예제 #1
0
 def decodeOpenload(link):
     #get cookies
     mediaId = Decoder.extract("/f/", "/", link)
     embedUrl = 'https://openload.io/embed/' + mediaId
     html = Downloader.getContentFromUrl(embedUrl, "", "", "", False, False)
     logger.info("html is: " + html)
     logger.debug("using cookie 1: " + Downloader.cookie)
     logger.debug("Media id for openload is: " + mediaId)
     extra = "&login=f750b26513f64034&key=oaA-MbZo"  #this avoid captcha petition
     link2 = "https://api.openload.io/1/file/dlticket?file=" + mediaId + extra
     data = Downloader.getContentFromUrl(link2, "", Downloader.cookie,
                                         embedUrl, True, False)
     logger.debug("jsonData: " + data)
     js_result = json.loads(data)
     logger.info("sleeping... " + str(js_result['result']['wait_time']))
     time.sleep(int(js_result['result']['wait_time']))
     link3 = 'https://api.openload.io/1/file/dl?file=%s&ticket=%s' % (
         mediaId, js_result['result']['ticket'])
     logger.debug("using cookie 2: " + Downloader.cookie)
     result = Downloader.getContentFromUrl(link3, "", Downloader.cookie,
                                           embedUrl, True, False)
     logger.debug("jsonData 2: " + result)
     js_result2 = json.loads(result)
     file = js_result2['result']['url'] + '?mime=true'
     logger.info("Built final link: " + file)
     return file
예제 #2
0
 def decodeOpenloadUsingOfficialApi(link): #API sucks, today it always returns a 509 with all logins xDDD
     #get cookies
     mediaId = Decoder.extract("/f/","/",link)
     embedUrl = 'https://openload.io/embed/'+mediaId
     html = Downloader.getContentFromUrl(embedUrl,"","","",False,False)
     logger.info("html is: "+html)
     logger.debug("using cookie 1: "+Downloader.cookie)
     logger.debug("Media id for openload is: "+mediaId)
     key = "oaA-MbZo"
     login = "******"
     extra = "&login="******"&key="+key #this avoid captcha petition
     link2 = "https://api.openload.io/1/file/dlticket?file="+mediaId+extra
     data = Downloader.getContentFromUrl(link2,"",Downloader.cookie,embedUrl,True,False)
     logger.debug("jsonData: "+data)
     js_result = json.loads(data)
     logger.info("sleeping... "+str(js_result['result']['wait_time']))
     time.sleep(int(js_result['result']['wait_time']))
     link3 = 'https://api.openload.io/1/file/dl?file=%s&ticket=%s' % (mediaId, js_result['result']['ticket'])
     logger.debug("using cookie 2: "+Downloader.cookie)
     result = Downloader.getContentFromUrl(link3,"",Downloader.cookie,embedUrl,True,False)
     logger.debug("jsonData 2: "+result)
     js_result2 = json.loads(result)
     file = js_result2['result']['url'] + '?mime=true'
     logger.info("Built final link: "+file)
     return file
예제 #3
0
 def getChannels(page, cookie='', referer=''):
     x = []
     html = ""
     if str(page) == '0':
         x = Fildonet.getMainSections()
     elif str(page) == '100artist':
         page = Fildonet.MAIN_URL
         html = Downloader.getContentFromUrl(page, "", cookie, "")
         x = Fildonet.extractElementsArtist(html)
     elif str(page) == 'topalbums':
         page = Fildonet.MAIN_URL
         html = Downloader.getContentFromUrl(page, "", cookie, "")
         x = Fildonet.extractElementsAlbum(html)
     elif str(page) == 'lastestplaylists':
         pass
     elif str(page).find('search') != -1:
         keyboard = xbmc.Keyboard("")
         keyboard.doModal()
         text = ""
         if (keyboard.isConfirmed()):
             text = keyboard.getText()
             x = Fildonet.search(text)
     else:
         page = base64.standard_b64decode(page)
         logger.info("ELSE --- page is: " + page)
         html = Downloader.getContentFromUrl(page, "", cookie, "")
         if page.find("albumId=") != -1:
             jsonData = json.loads(html)
             x = Fildonet.buildFromJSON(jsonData)
         else:
             x = Fildonet.extractElementsPlayer(html)
     return x
예제 #4
0
    def extractSawlive(scriptSrc, cookie, iframeUrl):
        encryptedHtml = Downloader.getContentFromUrl(scriptSrc, "", cookie, iframeUrl)
        # print encryptedHtml
        decryptedUrl = Decoder.decodeSawliveUrl(encryptedHtml)
        html3 = Downloader.getContentFromUrl(decryptedUrl, "", cookie, scriptSrc)
        # ok, now extract flash script content

        flashContent = Decoder.extract("var so = new SWFObject('", "</script>", html3)
        file = Decoder.extract("'file', '", "');", flashContent)
        rtmpUrl = ""
        if flashContent.find("'streamer', '") > 0.1:
            rtmpUrl = Decoder.extract("'streamer', '", "');", flashContent)
        swfUrl = "http://static3.sawlive.tv/player.swf"  # default
        # update swf url
        swfUrl = flashContent[: flashContent.find("'")]
        logger.info("updated swf player to: " + swfUrl)
        if rtmpUrl == "" and file.find("http://") > -1:
            finalRtmpUrl = file  # it's a redirect with an .m3u8, so it's used
        else:
            finalRtmpUrl = (
                rtmpUrl
                + " playpath="
                + file
                + " swfUrl="
                + swfUrl
                + " live=1 conn=S:OK pageUrl="
                + decryptedUrl
                + " timeout=12"
            )
        return finalRtmpUrl
예제 #5
0
 def decodeStreamliveto(html,page=''):
     iframeUrl = "http://www.streamlive.to/view/"+Decoder.extract('http://www.streamlive.to/embed/','&width=',html)
     html2 = Downloader.getContentFromUrl(iframeUrl,urllib.urlencode({"captcha":"yes"}),"",iframeUrl)
     if html2.find("Question:")>-1:#captcha
         #logger.debug(html2)
         captcha = Decoder.rExtract(': ','<br /><br />',html2)
         if captcha.find("(")>-1:
             logger.debug("resolving captcha with math..."+captcha)
             try:
                 captcha = Decoder.resolveSimpleMath(captcha)
             except:
                 logger.error("Could not resolve captcha: "+captcha)
                 pass
         logger.debug("captcha="+captcha)
         captchaPost = urllib.urlencode({'captcha': captcha})
         logger.debug(captchaPost)
         time.sleep(3)
         html2 = Downloader.getContentFromUrl(iframeUrl,captchaPost,Downloader.cookie,iframeUrl)
     link = "http://harddevelop.com/2015/11/tv-box.html|Referer=http://gordosyfrikis.com/" # ;)
     if html2.find("http://www.streamlive.to/ads/ilive_player.swf")>-1: #builds the link
         swfUrl = "http://www.streamlive.to/ads/streamlive.swf"
         tokenUrl = Decoder.extractWithRegex("http://www.streamlive.to/server.php?id=",'"',html2)
         tokenUrl = tokenUrl[:(len(tokenUrl)-1)]
         token = Downloader.getContentFromUrl(tokenUrl,"",Downloader.cookie,page)
         token = Decoder.extract('{"token":"','"}',token)
         file = Decoder.extract('file: "','",',html2).replace('.flv','')
         streamer = Decoder.extract('streamer: "','",',html2).replace("\\","")
         link = streamer+"./"+file+" playpath="+file+" live=1 token="+token+" swfUrl="+swfUrl+" pageUrl=http://www.streamlive.to/view"+(iframeUrl[iframeUrl.rfind("/"):])
         logger.debug("built a link to be used: "+link)
     return link
예제 #6
0
 def getChannels(page,cookie='',referer=''):
     x = []
     html = ""
     if str(page) == '0':
         x = Fildonet.getMainSections()
     elif str(page) == '100artist':
         page=Fildonet.MAIN_URL
         html = Downloader.getContentFromUrl(page,"",cookie,"")
         x = Fildonet.extractElementsArtist(html)
     elif str(page) == 'topalbums':
         page=Fildonet.MAIN_URL
         html = Downloader.getContentFromUrl(page,"",cookie,"")
         x = Fildonet.extractElementsAlbum(html)
     elif str(page) == 'lastestplaylists':
         pass
     elif str(page).find('search')!=-1:
         keyboard = xbmc.Keyboard("")
         keyboard.doModal()
         text = ""
         if (keyboard.isConfirmed()):
             text = keyboard.getText()
             x = Fildonet.search(text)
     else:
         page = base64.standard_b64decode(page)
         logger.info("ELSE --- page is: "+page)
         html = Downloader.getContentFromUrl(page,"",cookie,"")
         if page.find("albumId=")!=-1:
             jsonData = json.loads(html)
             x = Fildonet.buildFromJSON(jsonData)
         else:
             x = Fildonet.extractElementsPlayer(html)
     return x
예제 #7
0
 def decodeVidggTo(link):
     referer = "http://www.vidgg.to/player/cloudplayer.swf"
     html = Downloader.getContentFromUrl(link)
     file = Decoder.extract("flashvars.file=\"",'";',html)
     key = Decoder.extract("flashvars.filekey=\"",'";',html)
     url2 = "http://www.vidgg.to/api/player.api.php?pass=undefined&key="+key+"&user=undefined&numOfErrors=0&cid3=undefined&cid=1&file="+file+"&cid2=undefined"
     bruteResponse = Downloader.getContentFromUrl(url2)
     finalLink = Decoder.extract("url=","&title",bruteResponse)
     logger.debug("Final link is: "+finalLink)
     return finalLink
예제 #8
0
 def decodeVidggTo(link):
     referer = "http://www.vidgg.to/player/cloudplayer.swf"
     html = Downloader.getContentFromUrl(link)
     file = Decoder.extract("flashvars.file=\"", '";', html)
     key = Decoder.extract("flashvars.filekey=\"", '";', html)
     url2 = "http://www.vidgg.to/api/player.api.php?pass=undefined&key=" + key + "&user=undefined&numOfErrors=0&cid3=undefined&cid=1&file=" + file + "&cid2=undefined"
     bruteResponse = Downloader.getContentFromUrl(url2)
     finalLink = Decoder.extract("url=", "&title", bruteResponse)
     logger.debug("Final link is: " + finalLink)
     return finalLink
예제 #9
0
def openSpliveLink(url, page, provider):
    if url.find(".m3u8") == -1 and url.find("rtmp://") == -1:
        channel = Spliveappcom.decodeUrl(url, provider)
        link = channel[0]["link"]
        if link.find(", referer:") > -1:
            link = link[0 : link.find(", referer:")]
        url = link
    else:
        logger.debug("nothing decoded for splive encrypted channels, continue...")

    logger.debug("splive BRUTE logic for url: " + url)

    try:
        if "ponlatv.com" in url or "playerhd1.pw" in url:
            logger.debug("trying to decode cineestrenos script from url: " + url)
            url = Cineestrenostv.extractScriptLevel3(url, referer=Cineestrenostv.MAIN_URL)
            logger.debug("decoded link was: " + url)

        else:
            url = Cineestrenostv.getChannels(url)[0]["link"]
            html = Downloader.getContentFromUrl(url)
            element = Cineestrenostv.extractIframeChannel(html, url)
            if element is not None and element.has_key("link"):
                url = element["link"]
                logger.debug("cineestrenos url was decoded to: " + url)
            else:
                logger.debug("nothing was done to decode cineestrenostv url!")
    except:
        logger.debug("nothing to be decoded with url: " + url)
        pass

    link = url

    logger.info("found link: " + link + ", launching...")
    open(link, page)
예제 #10
0
def drawBbcCoUkNew(url):
    htmlContent = Downloader.getContentFromUrl(url=url)
    title = Decoder.extract('<p class="story-body__introduction">', "</p><div", htmlContent)
    if 'property="articleBody"' in htmlContent:
        body = Decoder.extract(
            'property="articleBody"',
            "                                                                                                </div>",
            htmlContent,
        )
        body = body.replace('<span class="off-screen">Image copyright</span>', "")
        body = body.replace('<span class="story-image-copyright">AFP</span>', "")
        body = body.replace('<span class="story-image-copyright">Reuters</span>', "")
        body = body.replace('<span class="off-screen">Image caption</span>', "")
        body = body.replace('<span class="off-screen">Media caption</span>', "")
        while '<span class="media-caption__text">' in body:
            line = Decoder.extractWithRegex('<span class="media-caption__text">', "</span>", body)
            body = body.replace(line, "")
    elif 'class="text-wrapper"' in htmlContent:
        # special content
        body = Decoder.extract('class="text-wrapper"', "</p>\n", htmlContent)
        dates = Decoder.extractWithRegex('<div class="date', "</div>", body)
        lastUpdate = Decoder.extractWithRegex('<p class="date ', "</p>", body)
        body = body.replace(dates, "")
        body = body.replace(lastUpdate, "")
    elif '<figcaption class="sp-media-asset' in htmlContent:
        body = Decoder.extract('<figcaption class="sp-media-asset', "</p><div ", htmlContent)
        if ">" in body:
            body = body[body.find(">") + 1 :]
    body = Decoder.removeHTML(body).replace(".", ".\n").replace(">", "")
    logger.debug("body is: " + body)
    drawNew(textContent=(body))
예제 #11
0
 def decodeIguide(iframeUrl3,iframeUrl2=''):
     logger.debug("iguide url is: "+iframeUrl3)
     html4 = Downloader.getContentFromUrl(iframeUrl3,"autoplay=true",Downloader.cookie,iframeUrl2)
     logger.debug("part 2 of iguide")
     #at this point is a similar logic than streamlive.to (probably because like always it's the same server), builds the link
     swfUrl = Decoder.rExtractWithRegex("http://",".swf",html4)
     logger.debug("using swfUrl: "+swfUrl)
     tokenUrl = Decoder.extractWithRegex("http://www.iguide.to/serverfile.php?id=",'"',html4)
     tokenUrl = tokenUrl[:(len(tokenUrl)-1)]
     token = Downloader.getContentFromUrl(tokenUrl,"",Downloader.cookie)
     token = Decoder.extract('{"token":"','"}',token)
     file = Decoder.extract("'file': '","',",html4).replace('.flv','')
     streamer = Decoder.extract("'streamer': '","',",html4).replace("\\","")
     link = streamer+" playpath="+file+" live=1 token="+token+" swfUrl="+swfUrl+" pageUrl="+iframeUrl3
     logger.debug("built a link to be used: "+link)
     return link
예제 #12
0
    def extractDinostreamPart(url,referer=''):
        element = {}
        logger.debug("url: "+url+", referer: "+referer)
        html4 = Downloader.getContentFromUrl(url,"","",referer)
        finalIframeUrl = Decoder.extractWithRegex('http://','%3D"',html4)
        finalIframeUrl = finalIframeUrl[0:len(finalIframeUrl)-1]
        logger.debug("proccessing level 4, cookie: "+Downloader.cookie)
        finalHtml = Downloader.getContentFromUrl(finalIframeUrl,"",Downloader.cookie,referer)
        logger.debug("proccessing level 5, cookie: "+Downloader.cookie)
        playerUrl = Decoder.decodeBussinessApp(finalHtml,finalIframeUrl)
        #print "player url is: "+playerUrl
        element["title"] = "Watch streaming"
        element["permalink"] = True
        element["link"] = playerUrl

        return element
예제 #13
0
 def getChannels(page,cookie='',referer=''):
     x = []
     html = ""
     if str(page) == '0':
         x = Redmp3cc.getMainSections()
     elif str(page) == 'songs.html':
         page=Redmp3cc.MAIN_URL+"/"
         html = Downloader.getContentFromUrl(page,"",cookie,"")
         x = Redmp3cc.extractElementsPlayer(html)
     elif str(page).find('search.html')!=-1:
         if str(page).find('search.html/')==-1:
             keyboard = xbmc.Keyboard("")
             keyboard.doModal()
             text = ""
             if (keyboard.isConfirmed()):
                 text = keyboard.getText()
                 x = Redmp3cc.search(text)
         else:
             text = Decoder.rExtract('search.html/','/',page)
             page = int(page[page.rfind('/')+1:])
             x = Redmp3cc.search(text,page)
     elif str(page).find(".html")!=-1:
         if str(page) == 'albums.html'!=-1:
             page = Redmp3cc.MAIN_URL
             html = Downloader.getContentFromUrl(page,"",cookie,"")
             x = Redmp3cc.extractElementsAlbum(html)
         else:
             html = Downloader.getContentFromUrl(page,"",cookie,"")
             x = Redmp3cc.extractElementsPlayer(html)
     else:
         logger.info("page is: "+page)
         response = Redmp3cc.getContentFromUrl(page,"",cookie,Redmp3cc.MAIN_URL,True)
         #logger.info("will be used a mp3 url: "+Decoder.extract('<a href="','">here',response))
         host = response[response.find("://")+len("://"):]
         if host.find("/")>-1:
             host = host[0:host.find("/")]
         cookie = Redmp3cc.cookie
         referer = page
         logger.info("cookie is: "+cookie+", referer is: "+referer)
         headers = downloadtools.buildMusicDownloadHeaders(host,cookie,referer)
         filename= Decoder.extract('filename=','&',response)
         #ROOT_DIR = xbmcaddon.Addon(id='org.harddevelop.kodi.juke').getAddonInfo('path')
         ROOT_DIR = xbmc.translatePath('special://temp/')
         logger.info("using special root folder: "+ROOT_DIR)
         downloadtools.downloadfile(response,ROOT_DIR+"/"+filename,headers,False,True)
         x.append(Redmp3cc.buildDownloadedFile(xbmc.makeLegalFilename(ROOT_DIR+"/"+filename)))
     return x
예제 #14
0
 def decodeVidag(link):
     html = Downloader.getContentFromUrl(link,"","","",False,True)
     try:
         encodedMp4File = Decoder.extract("<script type='text/javascript'>eval(function(p,a,c,k,e,d)","</script>",html)
     except:
         pass
     mp4File = jsunpack.unpack(encodedMp4File) #needs un-p,a,c,k,e,t|d
     mp4File = Decoder.extract(',{file:"','",',mp4File)
     return mp4File
예제 #15
0
def getListsUrls(url,icon=XBMCUtils.getAddonFilePath('icon.png'),provider='',finalTarget=1):
    #logger.debug("using url: "+url)
    html = Downloader.getContentFromUrl(url)
    if url.endswith(".xml") or ('<items>' in html or '<item>' in html): #main channels, it's a list to browse
        drawXml(html,icon=icon,finalTarget=finalTarget,provider=provider)
    elif url.endswith(".xspf"):
        drawXspf(html,icon)
    else: #it's the final list channel, split
        drawBruteChannels(html,icon)
예제 #16
0
 def decodeOpenload(link): #decode javascript link like Firefox
     mediaId = Decoder.extract("/f/","/",link)
     logger.debug("mediaId is: "+mediaId)
     link = link.replace('/f/', '/embed/')
     html = Downloader.getContentFromUrl(link,"data=data","","",False,True) #make post, with get there is an infinite loop
     #extract script
     script = re.search(r"<video(?:.|\s)*?<script\s[^>]*?>((?:.|\s)*?)</script", html, re.DOTALL | re.IGNORECASE).group(1)
     url = Decoder.decodeAAScript(script)
     logger.debug("decoded url is: "+url)
     return url
예제 #17
0
 def decodeVidag(link):
     html = Downloader.getContentFromUrl(link, "", "", "", False, True)
     try:
         encodedMp4File = Decoder.extract(
             "<script type='text/javascript'>eval(function(p,a,c,k,e,d)",
             "</script>", html)
     except:
         pass
     mp4File = jsunpack.unpack(encodedMp4File)  #needs un-p,a,c,k,e,t|d
     mp4File = Decoder.extract(',{file:"', '",', mp4File)
     return mp4File
예제 #18
0
 def extractSawlive(scriptSrc,cookie,iframeUrl):
     encryptedHtml = Downloader.getContentFromUrl(scriptSrc,"",cookie,iframeUrl)
     #print encryptedHtml
     decryptedUrl = Decoder.decodeSawliveUrl(encryptedHtml)
     html3 = Downloader.getContentFromUrl(decryptedUrl,"",cookie,scriptSrc)
     logger.debug("decrypted sawlive url content obtained!")
     #ok, now extract flash script content
     flashContent = Decoder.extract("var so = new SWFObject('","</script>",html3)
     file = Decoder.extract("'file', ",");",flashContent)
     logger.debug("proccessing brute file: "+file)
     #now proccess file, it can be a figure so needs to be appended if contains +
     if file.find("+")>1:
         newFile = ""
         for target in file.split("+"):
             seekedString = "var "+target+" = '"
             if html3.find(seekedString)>-1:
                 value = Decoder.extract(seekedString,"'",html3)
                 newFile += value
             else:
                 newFile += target
             logger.debug("now file is: "+newFile)
         file = newFile
         logger.debug("updated file to: "+file)
     else:
         file = file.replace("'","") #clean
     rtmpUrl = ""
     if flashContent.find("'streamer', '")>.1:
         rtmpUrl = Decoder.extract("'streamer', '","');",flashContent)
     else:
         rtmpVar = Decoder.extract("'streamer', ",");",flashContent)
         seekedString = "var "+rtmpVar+" = '"
         rtmpUrl = Decoder.extract(seekedString,"';",html3)
     swfUrl = "http://static3.sawlive.tv/player.swf" #default
     #update swf url
     swfUrl = flashContent[:flashContent.find("'")]
     logger.info("updated swf player to: "+swfUrl)
     if rtmpUrl=='' and file.find("http://")>-1:
         finalRtmpUrl = file #it's a redirect with an .m3u8, so it's used
     else:
         finalRtmpUrl = rtmpUrl+" playpath="+file+" swfUrl="+swfUrl+" live=1 conn=S:OK pageUrl="+decryptedUrl+" timeout=12"
     return finalRtmpUrl
예제 #19
0
 def decodeStreamable(link):
     html = Downloader.getContentFromUrl(link)
     flashContent = Decoder.extract('<object', '</object', html)
     movie = ""
     flashVars = ""
     for content in flashContent.split('<param'):
         value = Decoder.extract('value="', '"', content)
         name = Decoder.extract('name="', '"', content)
         if name == "movie" or name == "player":
             movie = value
         elif name == "FlashVars":
             flashVars = value
     swfUrl = "http://www.streamable.ch" + movie
     flashVars = flashVars[flashVars.find("="):]
     decodedFlashvars = base64.standard_b64decode(flashVars)
     logger.info("decoded url is: " + decodedFlashvars)
     response = Downloader.getContentFromUrl(decodedFlashvars)
     token = Decoder.extract("\"token1\":\"", "\"", response)
     finalLink = base64.standard_b64decode(token)
     logger.debug("final link is: " + finalLink)
     return finalLink
예제 #20
0
 def decodeStreamable(link):
     html = Downloader.getContentFromUrl(link)
     flashContent = Decoder.extract("<object", "</object", html)
     movie = ""
     flashVars = ""
     for content in flashContent.split("<param"):
         value = Decoder.extract('value="', '"', content)
         name = Decoder.extract('name="', '"', content)
         if name == "movie" or name == "player":
             movie = value
         elif name == "FlashVars":
             flashVars = value
     swfUrl = "http://www.streamable.ch" + movie
     flashVars = flashVars[flashVars.find("=") :]
     decodedFlashvars = base64.standard_b64decode(flashVars)
     logger.info("decoded url is: " + decodedFlashvars)
     response = Downloader.getContentFromUrl(decodedFlashvars)
     token = Decoder.extract('"token1":"', '"', response)
     finalLink = base64.standard_b64decode(token)
     logger.debug("final link is: " + finalLink)
     return finalLink
예제 #21
0
    def decrypt(encrypted):
        decrypted = encrypted
        try:
            logger.debug("Encrypted content is: "+encrypted)
            if not ONLINE:
                decrypted = PBEWithMD5AndDES.decrypt(encrypted, Spliveappcom.PASSWORD)
            elif len(encrypted)>0 and encrypted.find("http://")==-1:
                decrypted = Downloader.getContentFromUrl(Spliveappcom.DECODER_URL+'?data='+encrypted+"&key="+Spliveappcom.PASSWORD+"&iterations=1000")
            logger.debug("Decrypted content is: "+decrypted)
        except:
            logger.error("Could not be unencrypted: "+encrypted)
            pass

        return decrypted
예제 #22
0
 def decodeOpenload(link):
     # get cookies
     mediaId = Decoder.extract("/f/", "/", link)
     embedUrl = "https://openload.io/embed/" + mediaId
     html = Downloader.getContentFromUrl(embedUrl, "", "", "", False, False)
     logger.info("html is: " + html)
     logger.debug("using cookie 1: " + Downloader.cookie)
     logger.debug("Media id for openload is: " + mediaId)
     extra = "&login=f750b26513f64034&key=oaA-MbZo"  # this avoid captcha petition
     link2 = "https://api.openload.io/1/file/dlticket?file=" + mediaId + extra
     data = Downloader.getContentFromUrl(link2, "", Downloader.cookie, embedUrl, True, False)
     logger.debug("jsonData: " + data)
     js_result = json.loads(data)
     logger.info("sleeping... " + str(js_result["result"]["wait_time"]))
     time.sleep(int(js_result["result"]["wait_time"]))
     link3 = "https://api.openload.io/1/file/dl?file=%s&ticket=%s" % (mediaId, js_result["result"]["ticket"])
     logger.debug("using cookie 2: " + Downloader.cookie)
     result = Downloader.getContentFromUrl(link3, "", Downloader.cookie, embedUrl, True, False)
     logger.debug("jsonData 2: " + result)
     js_result2 = json.loads(result)
     file = js_result2["result"]["url"] + "?mime=true"
     logger.info("Built final link: " + file)
     return file
예제 #23
0
    def extractSawlive(scriptSrc, cookie, iframeUrl):
        encryptedHtml = Downloader.getContentFromUrl(scriptSrc, "", cookie,
                                                     iframeUrl)
        #print encryptedHtml
        decryptedUrl = Decoder.decodeSawliveUrl(encryptedHtml)
        html3 = Downloader.getContentFromUrl(decryptedUrl, "", cookie,
                                             scriptSrc)
        #ok, now extract flash script content

        flashContent = Decoder.extract("var so = new SWFObject('", "</script>",
                                       html3)
        file = Decoder.extract("'file', '", "');", flashContent)
        rtmpUrl = ""
        if flashContent.find("'streamer', '") > .1:
            rtmpUrl = Decoder.extract("'streamer', '", "');", flashContent)
        swfUrl = "http://static3.sawlive.tv/player.swf"  #default
        #update swf url
        swfUrl = flashContent[:flashContent.find("'")]
        logger.info("updated swf player to: " + swfUrl)
        if rtmpUrl == '' and file.find("http://") > -1:
            finalRtmpUrl = file  #it's a redirect with an .m3u8, so it's used
        else:
            finalRtmpUrl = rtmpUrl + " playpath=" + file + " swfUrl=" + swfUrl + " live=1 conn=S:OK pageUrl=" + decryptedUrl + " timeout=12"
        return finalRtmpUrl
예제 #24
0
 def decodeKeepVid(link):
     html = Downloader.getContentFromUrl("http://keepvid.com/?url="+urllib.quote_plus(link))
     tableHtml = Decoder.extract('<ul><li>',"</ul>",html)
     logger.debug("extracting from html: "+tableHtml)
     links = []
     selectedLink = ""
     for liHtml in tableHtml.split('</li>'):
         link = Decoder.extract('a href="','"',liHtml)
         title = Decoder.extract('alt="', '"', liHtml)
         if "1080p" in title and '(Video Only)' not in title:
             selectedLink = link
         elif len(selectedLink)==0 and "720p" in title and '(Video Only)' not in title:
             selectedLink = link
         else:
             logger.debug("No link selected with title: "+title)
         logger.debug("url at this moment is (youtube external): " + link)
         links.append(link)
     if len(selectedLink)==0:
         selectedLink = links[0]
     return selectedLink
예제 #25
0
 def extractElementsSearch(html):
     jsonContent = json.loads(html)
     x = []
     for jsonValues in jsonContent:
         element = {}
         element["title"] = jsonValues["label"]+" - "+jsonValues["category"]
         if jsonValues["category"] == 'Artists':
             link = base64.standard_b64encode(Fildonet.ARTIST+str(jsonValues["label"]))
         elif jsonValues["category"] == 'Albums':
             link = base64.standard_b64encode(Fildonet.ALBUM+str(jsonValues["id"]))
         elif jsonValues["category"] == 'Songs':
             id = jsonValues["id"]
             html2 = Downloader.getContentFromUrl(Fildonet.SONG+str(id))
             songsJSONS = json.loads(html2)
             for songsJSON in songsJSONS:
                 link = songsJSON["mp3Url"]
                 element["thumbnail"] = songsJSON["picUrl"]
         element["link"] = link
         x.append(element)
     return x
예제 #26
0
 def extractTargetVideo(page):
     logger.debug("extracting from page: "+page)
     html = Streamgaroo.getContentFromUrl(url=page,referer=Streamgaroo.MAIN_URL)
     logger.debug("html is: "+html)
     apiKey = Decoder.extract('data-sh="','"',html)
     bruteJSON = Streamgaroo.getContentFromUrl(Streamgaroo.CHANNEL_API, "h="+apiKey, Streamgaroo.cookie, Streamgaroo.MAIN_URL)
     jsonList = json.loads(bruteJSON)
     url2 = jsonList["link"]
     logger.debug("using url: "+url2)
     html2 = Streamgaroo.getContentFromUrl(url2, "", Streamgaroo.cookie, page)
     logger.debug("html2 is: "+html2)
     if 'playJS("' in html2:
         finalUrl = Decoder.extract('playJS("','"',html2)
         logger.debug("found final url: "+finalUrl)
         finalUrl = finalUrl.replace("http://www.streamgaroo.com/fetch/r/","") #clean proxies
         if 'playlist.m3u8' in finalUrl and '==' in finalUrl:
             finalUrl = finalUrl.replace('playlist.m3u8?','chunks.m3u8?')
         finalUrl = finalUrl + "|" + urllib.unquote(Downloader.getHeaders())
     elif "playStream('iframe','" in html2:
         iframeUrl = finalUrl = Decoder.extract("playStream('iframe','","'",html2)
         logger.debug("found iframe link: " + iframeUrl)
         try:
             iframeHtml = Downloader.getContentFromUrl(url=iframeUrl, data=" ", referer=page)
         except:
             logger.debug("trying second way, easy!!")
             import urllib2
             req = urllib2.Request(iframeUrl)
             req.add_header('Referer', page)
             req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0')
             resp = urllib2.urlopen(req)
             iframeHtml = resp.read()
             logger.debug("done!")
             pass
         logger.debug("html iframe is: "+iframeHtml)
         if 'adca.st/broadcast/player' in iframeHtml:
             finalUrl = Decoder.decodeBroadcastst(iframeUrl,page)
         elif 'vaughnlive.tv/embed/video/' in iframeUrl:
             finalUrl = Decoder.decodeVaughnlivetv(iframeUrl,page)
     logger.debug("done!")
     return finalUrl
예제 #27
0
    def extractTargetVideo(link):
        logger.debug("trying to decode with youtube link decrypter: " + link)
        code = link[link.find("v=") + 2:]
        logger.debug("trying with code: " + code)
        try:
            link = Decoder.downloadY(code)
        except:
            # trying second way, external page

            html = Downloader.getContentFromUrl(link, referer=Youtube.MAIN_URL)
            oldLink = link
            if 'ytplayer.config = {' in html:
                logger.debug("trying new way for .m3u8 links...")
                link = Decoder.extract(',"hlsvp":"', '"', html).replace('\\', '')
                link = urllib.unquote(link)
                logger.debug("new youtube extracted link from json is: " + link)
                # link += "|" + Downloader.getHeaders(oldLink)
            if "http" not in link:
                logger.debug("trying old second way: external resource...")
                link = Youtube.decodeKeepVid(oldLink)
            pass
        if ".m3u8" in link:
            bruteM3u8 = Youtube.getContentFromUrl(link);
            if 'https://' in bruteM3u8:
                m3u8 = bruteM3u8[bruteM3u8.rfind('https://'):]
                link = urllib.unquote_plus(m3u8).strip()
                logger.debug("using the last one inside: "+m3u8)
            else:
                logger.debug("no last one link selected :'(")
        else:
            logger.debug("nothing is transformed for youtube links.")

        logger.debug("final youtube decoded url is: " + link)
        if ";" in link:
            link = link.replace("=", "%3D").replace(";", "%3B")
        else:
            link = link.replace("%3D","=")
        return link
예제 #28
0
 def extractElementsSearch(html):
     jsonContent = json.loads(html)
     x = []
     for jsonValues in jsonContent:
         element = {}
         element[
             "title"] = jsonValues["label"] + " - " + jsonValues["category"]
         if jsonValues["category"] == 'Artists':
             link = base64.standard_b64encode(Fildonet.ARTIST +
                                              str(jsonValues["label"]))
         elif jsonValues["category"] == 'Albums':
             link = base64.standard_b64encode(Fildonet.ALBUM +
                                              str(jsonValues["id"]))
         elif jsonValues["category"] == 'Songs':
             id = jsonValues["id"]
             html2 = Downloader.getContentFromUrl(Fildonet.SONG + str(id))
             songsJSONS = json.loads(html2)
             for songsJSON in songsJSONS:
                 link = songsJSON["mp3Url"]
                 element["thumbnail"] = songsJSON["picUrl"]
         element["link"] = link
         x.append(element)
     return x
예제 #29
0
 def decodeCastalbatv(url,page=''):
     channelId = url[url.find('cid=')+len('cid='):]
     if channelId.find("&")>-1:
         channelId = channelId[:channelId.find("&")]
     #iframeUrl = "http://castalba.tv/channel/"+channelId
     iframeUrl = url;
     logger.debug("using referer: "+page)
     html = Downloader.getContentFromUrl(iframeUrl,'',"",page)
     file = "";
     if html.find(".m3u8")>-1:
         file = Decoder.rExtract("'file': '",'.m3u8',html)
         logger.debug("detected castalba file: "+file)
         if len(file)>0 and page!='':
             file+="|Referer="+page
         else:
             file+="|Referer="+file
     else:
         file = Decoder.extract("var file = '","'",html)
         flash= Decoder.extract("'flashplayer': \"","\"",html)
         rtmpUrl = "rtmp://"+Decoder.extract("return '/","';",html)
         playpath = file+"?"+Decoder.extract("unescape('?","'),",html)
         file = rtmpUrl+" playpath="+playpath+" swfUrl="+flash+" live=1 pageUrl=http://castalba.tv/"
     logger.debug("final link from castalba is: "+file)
     return file
예제 #30
0
 def search(text,page=0,cookie=''):
     page = "http://redmp3.cc/mp3-"+urllib.unquote_plus(text)+"/"+str(page)
     html = Downloader.getContentFromUrl(page,"",cookie,"")
     x = Redmp3cc.extractElementsPlayer(html)
     return x
예제 #31
0
파일: mobdro.py 프로젝트: harddevelop/tvbox
 def pretoken():
     if (Mobdro.TOKEN is None or len(Mobdro.TOKEN)==0) and (Mobdro.REMOTE_TOKEN is not None and len(Mobdro.REMOTE_TOKEN)>0):
         remoteToken = Downloader.getContentFromUrl(Mobdro.REMOTE_TOKEN).encode("utf-8").strip()
         logger.debug("mobdro token is: "+remoteToken)
         Mobdro.TOKEN = remoteToken
         logger.debug("constant is: "+Mobdro.TOKEN)
예제 #32
0
def open(url,page):
	if url.find("rtmp://")==-1 and url.find("|Referer=")==-1 and ( url.find("http://privatestream.tv/")>-1 or url.find("http://www.dinostream.pw/")>-1 or url.find("http://www.embeducaster.com/")>-1 or url.find("http://tv.verdirectotv.org/channel.php")>-1 or url.find("http://mamahd.com/")>-1):
		logger.info("brute url [referer] is: "+url)
		referer = ''
		if(url.find("referer: ")>-1):
			referer = url[url.find("referer: ")+len("referer: "):]
		url = url[0:url.find(",")]
		if url.find("http://privatestream.tv/")>-1:
			html = Downloader.getContentFromUrl(url,"","",referer)
			url = Decoder.decodePrivatestream(html,referer)
		elif url.find("http://www.dinostream.pw/")>-1:
			url = Decoder.extractDinostreamPart(url,referer)["link"]
		elif url.find("http://www.embeducaster.com/")>-1:
			#url = url.replace("/membedplayer/","/embedplayer/")
			url = Cineestrenostv.getContentFromUrl(url,"","",referer)
		elif url.find("http://tv.verdirectotv.org/channel.php")>-1:
			html4 = Cineestrenostv.getContentFromUrl(url,"",Cineestrenostv.cookie,referer)
			finalIframeUrl = Decoder.extractWithRegex('http://','%3D"',html4)
			if finalIframeUrl.find('"')>-1 or finalIframeUrl.find("'")>-1:
				finalIframeUrl = finalIframeUrl[0:len(finalIframeUrl)-1]
			finalHtml = Cineestrenostv.getContentFromUrl(finalIframeUrl,"",Cineestrenostv.cookie,referer)
			url = Decoder.decodeBussinessApp(finalHtml,finalIframeUrl)
		elif url.find("http://mamahd.com/")>-1:
			url = Mamahdcom.getChannels(url)[0]["link"]
		elif url.find("http://showsport-tv.com/")>-1:
			url = ShowsportTvCom.getChannels(url)[0]["link"]
	elif url.find("rtmp://")==-1:
		try:
			if url.find(", referer: ")>-1:
				page = url[url.find(", referer: ")+len(", referer: "):]
				url = url[:url.find(", referer: ")]
				logger.debug("changing page to referer: "+page)
			logger.debug("trying decoder part for url: "+url)
			url = Decoder.decodeLink(url,page)
		except:
			logger.info("decoder url launched an exception, probably could not be decoded")
			pass
	#launch redirects to his better addons
	if url.find("sop://")>-1 or url.find("acestream://")>-1 or url.find(".acelive")>-1: #required plexus or something similar installed, this dependency is external from this addon so needs to be installed
		logger.info("trying to send link to plexus: "+url)
		mode = "1"
		if url.find("sop://")>-1:
			mode = "2"
		url = "plugin://program.plexus/?mode="+mode+"&url="+url+"&name=RemoteLink"
	elif url.find(".torrent")>-1 or url.find("magnet:")>-1:
		logger.info("trying to send link to quasar: "+url)
		url = urllib.quote_plus(url)
		url = "plugin://plugin.video.quasar/play?uri="+url
	elif url.find("youtube.com/")>-1:
		id = ""
		if url.find("v=")>-1:
			id = url[url.find("v=")+len("v="):]
		elif url.find("/embed/")>-1:
			id = url[url.find("/embed/")+len("/embed/"):]
		url = "plugin://plugin.video.youtube/play/?video_id="+id+""
	elif url.find("vimeo.com/")>-1:
		url = "plugin://plugin.video.vimeo/play/?video_id="+urllib.quote_plus(url)
	else:
		logger.info("nothing done!")
	logger.debug("launching playable url: "+url)
	play(url,page)
예제 #33
0
 def search(text,page=0,cookie=''):
     page = "http://fildo.net/autocomplete163.php?term="+urllib.quote_plus(text)
     html = Downloader.getContentFromUrl(page,"",cookie,"")
     x = Fildonet.extractElementsSearch(html)
     return x
예제 #34
0
 def search(text, page=0, cookie=''):
     page = "http://fildo.net/autocomplete163.php?term=" + urllib.quote_plus(
         text)
     html = Downloader.getContentFromUrl(page, "", cookie, "")
     x = Fildonet.extractElementsSearch(html)
     return x
예제 #35
0
    def getFinalHtmlFromLink(link, waitTime=10, inhu=False):
        data = Downloader.getContentFromUrl(link, "", "lang=english")
        html = ""
        if data.find("<script type='text/javascript'>eval(function(p,a,c,k,e"
                     ) == -1:

            finalCookie = "lang=english"
            '''
            cookies = ";"
            cookies = response.info()['Set-Cookie']

            for cookie in cookies.split(";"):
                if cookie.find("path=") == -1 and cookie.find("expires=") == -1 and cookie.find("Max-Age=") and cookie.find("domain="):
                    if len(finalCookie)>0:
                        finalCookie += "; "
                    finalCookie+= cookie

            logger.info('Extracted cookie: '+finalCookie)
            '''
            #build form
            if data.find('type="hidden" name="op" value="') > -1:
                op = Decoder.extract('type="hidden" name="op" value="', '"',
                                     data)
                id = Decoder.extract('type="hidden" name="id" value="', '"',
                                     data)
                fname = Decoder.extract('type="hidden" name="fname" value="',
                                        '"', data)
                usr_login = Decoder.extract(
                    'type="hidden" name="usr_login" value="', '"', data)
                referer = Decoder.extract(
                    'type="hidden" name="referer" value="', '"', data)
                hash = Decoder.extract('type="hidden" name="hash" value="',
                                       '"', data)
                if inhu == False:
                    imhuman = Decoder.extract(
                        'type="submit" name="imhuman" value="', '"',
                        data).replace("+", " ")
                    form = {
                        'op': op,
                        'id': id,
                        'usr_login': usr_login,
                        'fname': fname,
                        'referer': referer,
                        'hash': hash,
                        'imhuman': imhuman
                    }
                else:
                    btn_download = ""
                    inhu = Decoder.extract('type="hidden" name="inhu" value="',
                                           '"', data)
                    gfk = Decoder.extract("name: 'gfk', value: '", "'", data)
                    vhash = Decoder.extract("name: '_vhash', value: '", "'",
                                            data)

                    form = {
                        'op': op,
                        'id': id,
                        'usr_login': usr_login,
                        'fname': fname,
                        'referer': referer,
                        'hash': hash,
                        'inhu': inhu,
                        '_vhash': vhash,
                        'gfk': gfk,
                        'imhuman': btn_download
                    }

                if op != '':
                    time.sleep(waitTime)
                    html = Decoder.getContent(link, form, link, finalCookie,
                                              False).read()
        else:
            html = data

        return html
예제 #36
0
    def getFinalHtmlFromLink(link, waitTime=10, inhu=False):
        data = Downloader.getContentFromUrl(link, "", "lang=english")
        html = ""
        if data.find("<script type='text/javascript'>eval(function(p,a,c,k,e") == -1:

            finalCookie = "lang=english"
            """
            cookies = ";"
            cookies = response.info()['Set-Cookie']

            for cookie in cookies.split(";"):
                if cookie.find("path=") == -1 and cookie.find("expires=") == -1 and cookie.find("Max-Age=") and cookie.find("domain="):
                    if len(finalCookie)>0:
                        finalCookie += "; "
                    finalCookie+= cookie

            logger.info('Extracted cookie: '+finalCookie)
            """
            # build form
            if data.find('type="hidden" name="op" value="') > -1:
                op = Decoder.extract('type="hidden" name="op" value="', '"', data)
                id = Decoder.extract('type="hidden" name="id" value="', '"', data)
                fname = Decoder.extract('type="hidden" name="fname" value="', '"', data)
                usr_login = Decoder.extract('type="hidden" name="usr_login" value="', '"', data)
                referer = Decoder.extract('type="hidden" name="referer" value="', '"', data)
                hash = Decoder.extract('type="hidden" name="hash" value="', '"', data)
                if inhu == False:
                    imhuman = Decoder.extract('type="submit" name="imhuman" value="', '"', data).replace("+", " ")
                    form = {
                        "op": op,
                        "id": id,
                        "usr_login": usr_login,
                        "fname": fname,
                        "referer": referer,
                        "hash": hash,
                        "imhuman": imhuman,
                    }
                else:
                    btn_download = ""
                    inhu = Decoder.extract('type="hidden" name="inhu" value="', '"', data)
                    gfk = Decoder.extract("name: 'gfk', value: '", "'", data)
                    vhash = Decoder.extract("name: '_vhash', value: '", "'", data)

                    form = {
                        "op": op,
                        "id": id,
                        "usr_login": usr_login,
                        "fname": fname,
                        "referer": referer,
                        "hash": hash,
                        "inhu": inhu,
                        "_vhash": vhash,
                        "gfk": gfk,
                        "imhuman": btn_download,
                    }

                if op != "":
                    time.sleep(waitTime)
                    html = Decoder.getContent(link, form, link, finalCookie, False).read()
        else:
            html = data

        return html