コード例 #1
0
ファイル: upafile.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[upafile.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data = scrapertools.cache_page(page_url)
    #<script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('11 0=10 z(\'2://4.3/6/6.y\',\'6\',\'x\',\'w\',\'9\');0.5(\'v\',\'u\');0.5(\'t\',\'s\');0.5(\'r\',\'q\');0.1(\'p\',\'\');0.1(\'o\',\'2://a.4.3:n/d/m/8.l\');0.1(\'k\',\'2://a.4.3/i/j/h.g\');0.1(\'7\',\'8\');0.1(\'7\',\'2\');0.1(\'2.f\',\'e\');0.c(\'b\');',36,38,'s1|addVariable|http|com|upafile|addParam|player|provider|video||s82|flvplayer|write||start|startparam|jpg|idyoybh552bf||00024|image|mp4|k65ufdsgg7pvam5r5o22urriqvsqzkkf4cu3biws2xwxsvgmrfmjyfbz|182|file|duration|opaque|wmode|always|allowscriptaccess|true|allowfullscreen|400|500|swf|SWFObject|new|var'.split('|')))
    patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d\).*?)</script>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    cifrado=""
    for match in matches:
        logger.info("match="+match)
        if "mp4" in match or "flv" in match or "video" in match:
            cifrado = match
            break
    
    # Extrae la URL del vídeo
    logger.info("cifrado="+cifrado)
    descifrado = unpackerjs.unpackjs(cifrado)
    descifrado = descifrado.replace("\\","")
    logger.info("descifrado="+descifrado)

    #s1.addVariable('file','http://s82.upafile.com:182/d/k65ufdsgg7pvam5r5o22urriqvsqzkkf4cu3biws2xwxsvgmrfkxwzx4/video.mp4')
    media_url = scrapertools.get_match(descifrado,"addVariable\('file','([^']+)'")
    
    if len(matches)>0:
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [upafile]",media_url])

    for video_url in video_urls:
        logger.info("[upafile.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #2
0
ファイル: gamovideo.py プロジェクト: Reat0ide/pelis_backup
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.gamovideo get_video_url(page_url='%s')" % page_url)

    # Lo pide una vez
    headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
    data = scrapertools.cache_page( page_url , headers=headers )
    #logger.info("data="+data)
    
    try:
        '''
        <input type="hidden" name="op" value="download1">
        <input type="hidden" name="usr_login" value="">
        <input type="hidden" name="id" value="auoxxtvyquoy">
        <input type="hidden" name="fname" value="Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi">
        <input type="hidden" name="referer" value="">
        <input type="hidden" name="hash" value="1624-83-46-1377796069-b5e6b8f9759d080a3667adad637f00ac">
        <input type="submit" name="imhuman" value="Continue to Video" id="btn_download">
        '''
        op = scrapertools.get_match(data,'<input type="hidden" name="op" value="(down[^"]+)"')
        usr_login = ""
        id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)"')
        fname = scrapertools.get_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
        referer = scrapertools.get_match(data,'<input type="hidden" name="referer"\s+value="([^"]*)"')
        hashvalue = scrapertools.get_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
        submitbutton = scrapertools.get_match(data,'<input type="submit" name="imhuman" value="([^"]+)"').replace(" ","+")

        import time
        time.sleep(5)

        # Lo pide una segunda vez, como si hubieras hecho click en el banner
        #op=download1&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796019-c2b422f91da55d12737567a14ea3dffe&imhuman=Continue+to+Video
        #op=search&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796398-8020e5629f50ff2d7b7de99b55bdb177&imhuman=Continue+to+Video
        post = "op="+op+"&usr_login="******"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashvalue+"&imhuman="+submitbutton
        headers.append(["Referer",page_url])
        data = scrapertools.cache_page( page_url , post=post, headers=headers )
        #logger.info("data="+data)
    except:
        import traceback
        traceback.print_exc()
    
    # Extrae la URL
    logger.info("data="+data)
    data = scrapertools.find_single_match(data,"<script type='text/javascript'>(.*?)</script>")
    logger.info("data="+data)
    data = unpackerjs.unpackjs(data)
    logger.info("data="+data)


    pfile = scrapertools.get_match(data,'file\s*\:\s*"([^"]+)"')
    pstreamer = scrapertools.get_match(data,'streamer\s*\:\s*"([^"]+)"')

    media_url = pstreamer+" playpath="+pfile

    video_urls = []
    video_urls.append( [ "RTMP [gamovideo]",media_url])

    for video_url in video_urls:
        logger.info("[gamovideo.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #3
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("videostoring get_video_url(page_url='%s')" % page_url)
    if not "embed" in page_url:
        page_url = page_url.replace(
            "http://www.videostoring.com/",
            "http://www.videostoring.com/embed-") + ".html"

    data = scrapertools.cache_page(page_url)
    data = scrapertools.find_single_match(
        data, "<script type='text/javascript'>(.*?)</script>")
    data = unpackerjs.unpackjs(data)

    url = scrapertools.get_match(data, '<param name="src"value="([^"]+)"/>')
    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(url)[-4:] + " [videostoring]", url
    ])

    for video_url in video_urls:
        logger.info("[videostoring.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #4
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.gamovideo get_video_url(page_url='%s')" %
                page_url)
    if not "embed" in page_url:
        page_url = page_url.replace(
            "http://gamovideo.com/",
            "http://gamovideo.com/embed-") + "-640x360.html"

    data = scrapertools.cache_page(page_url)
    data = scrapertools.find_single_match(
        data, "<script type='text/javascript'>(.*?)</script>")
    data = unpackerjs.unpackjs(data)

    host = scrapertools.get_match(data, 'image:"(http://[^/]+/)')
    flv_url = scrapertools.get_match(data, ',\{file:"([^"]+)"')
    rtmp_url = scrapertools.get_match(data, '\[\{file:"([^"]+)"')
    flv = host + flv_url.split("=")[1] + "/v.flv"

    video_urls = []
    video_urls.append(
        [scrapertools.get_filename_from_url(flv)[-4:] + " [gamovideo]", flv])
    #video_urls.append(["RTMP [gamovideo]",rtmp_url])

    for video_url in video_urls:
        logger.info("[gamovideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #5
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[vidxden.py] url="+page_url)
    if ".html" not in page_url:
        logger.info("[vidxden.py] URL incompleta")
        data = scrapertools.cache_page(page_url)
        patron = '<input name="fname" type="hidden" value="([^"]+)">'
        matches = re.compile(patron,re.DOTALL).findall(data)
        page_url = page_url+"/"+matches[0]+".html"

        
    # Lo pide una vez
    scrapertools.cache_page( page_url , headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']] )
    
    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    patron = 'http\:\/\/www\.vidxden\.com/([^\/]+)/(.*?)\.html'
    matches = re.compile(patron,re.DOTALL).findall(page_url)
    logger.info("[vidxden.py] fragmentos de la URL")
    scrapertools.printMatches(matches)
    
    codigo = ""
    nombre = ""
    if len(matches)>0:
        codigo = matches[0][0]
        nombre = matches[0][1]

    post = "op=download1&usr_login=&id="+codigo+"&fname="+nombre+"&referer=&method_free=Free+Stream"
    data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
    
    # Extrae el trozo cifrado
    patron = '<div id="embedcontmvshre"[^>]+>(.*?)</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches)>0:
        data = matches[0]
        logger.info("[vidxden.py] bloque packed="+data)
    else:
        logger.info("[vidxden.py] no encuentra bloque packed="+data)

        return ""
    
    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    
    # Extrae la URL del vídeo
    logger.info("descifrado="+descifrado)
    # Extrae la URL
    patron = '<param name="src"value="([^"]+)"/>'
    matches = re.compile(patron,re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    
    video_urls = []
    
    if len(matches)>0:
        video_urls.append( ["."+matches[0].rsplit('.',1)[1]+" [vidxden]",matches[0]])

    for video_url in video_urls:
        logger.info("[vidxden.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #6
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[downupload.py] get_video_url(page_url='%s')" % page_url)

    page_url = page_url.replace("amp;","")
    data = scrapertools.cache_page(page_url)
    
    video_urls = []

    # s1.addVariable('file','http://78.140.181.136:182/d/kka3sx52abiuphevyzfirfaqtihgyq5xlvblnetok2mj4llocdeturoy/video.mp4');
    # http://downupload.com:182/d/k2a3kxf2abiuphevyzfirgajremkk3if57xcpelwboz4hbzjnfsvbit6/video.mp4
    patron  = "(http://[\S]+\.mp4)" 
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    if len(matches)>0:
        scrapertools.printMatches(matches)
        for match in matches:
            videourl = match
            videourl = videourl.replace('%5C','')
            videourl = urllib.unquote(videourl)
            video_urls.append( [ ".mp4 [Downupload]" , videourl ] )
            
    else:
        # Si es un enlace de Descarga se busca el archivo
        patron  = '<div id="player_code">.*?value[\W]name[\W]param[\W]com[\W]http[\W]false[\W](.*?)[\W]divx[\W]previewImage[\W].*?[\W]custommode[\W](.*?)[\W](.*?)[\W](.*?)[\W]src'
        matches = re.compile(patron,re.DOTALL).findall(data)
        scrapertools.printMatches(matches)
        for match in matches:
            videourl = "http://"+match[0]+".com:"+match[3]+"/d/"+match[2]+"/video."+match[1]
            videourl = videourl.replace('|','.')
            videourl = urllib.unquote(videourl)
            video_urls.append( [ "."+match[1]+" [Downupload]" , videourl ] )
            
        # Localiza enlaces con IP
        if len(matches)==0:
            patron  = '<div id="player_code">.*?value[\W]name[\W]param[\W]http[\W]false[\W](.*?)[\W](.*?)[\W](.*?)[\W](.*?)[\W]divx[\W]previewImage[\W].*?[\W]custommode[\W](.*?)[\W](.*?)[\W](.*?)[\W]src'
            matches = re.compile(patron,re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            for match in matches:
                videourl = "http://"+match[3]+"."+match[2]+"."+match[1]+"."+match[0]+":"+match[6]+"/d/"+match[5]+"/video."+match[4]
                videourl = videourl.replace('|','')
                videourl = urllib.unquote(videourl)
                video_urls.append( [ "."+match[4]+" [Downupload]" , videourl ] )
            # Otro metodo de busqueda
            if len(matches)==0:
                url = unpackerjs.unpackjs(data)
                logger.info("[unpackerjs.py] "+url)
                patron = 'src"value="([^"]+)"'
                matches = re.compile(patron,re.DOTALL).findall(url)
                for match in matches:                  
                    videourl = match
                    videourl = videourl.replace('|','')
                    videourl = urllib.unquote(videourl)
                    video_urls.append( [ "."+videourl.rsplit('.',1)[1]+" [Downupload]" , videourl ] )                  
                    
                

    for video_url in video_urls:
        logger.info("[downupload.py] %s - %s" % (video_url[0],video_url[1]))
        
    return video_urls
コード例 #7
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[upafile.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data = scrapertools.cache_page(page_url)
    #<script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('11 0=10 z(\'2://4.3/6/6.y\',\'6\',\'x\',\'w\',\'9\');0.5(\'v\',\'u\');0.5(\'t\',\'s\');0.5(\'r\',\'q\');0.1(\'p\',\'\');0.1(\'o\',\'2://a.4.3:n/d/m/8.l\');0.1(\'k\',\'2://a.4.3/i/j/h.g\');0.1(\'7\',\'8\');0.1(\'7\',\'2\');0.1(\'2.f\',\'e\');0.c(\'b\');',36,38,'s1|addVariable|http|com|upafile|addParam|player|provider|video||s82|flvplayer|write||start|startparam|jpg|idyoybh552bf||00024|image|mp4|k65ufdsgg7pvam5r5o22urriqvsqzkkf4cu3biws2xwxsvgmrfmjyfbz|182|file|duration|opaque|wmode|always|allowscriptaccess|true|allowfullscreen|400|500|swf|SWFObject|new|var'.split('|')))
    patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d\).*?)</script>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    cifrado=""
    for match in matches:
        logger.info("match="+match)
        if "mp4" in match or "flv" in match or "video" in match:
            cifrado = match
            break
    
    # Extrae la URL del vídeo
    logger.info("cifrado="+cifrado)
    descifrado = unpackerjs.unpackjs(cifrado)
    descifrado = descifrado.replace("\\","")
    logger.info("descifrado="+descifrado)

    #s1.addVariable('file','http://s82.upafile.com:182/d/k65ufdsgg7pvam5r5o22urriqvsqzkkf4cu3biws2xwxsvgmrfkxwzx4/video.mp4')
    media_url = scrapertools.get_match(descifrado,"addVariable\('file','([^']+)'")
    
    if len(matches)>0:
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [upafile]",media_url])

    for video_url in video_urls:
        logger.info("[upafile.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #8
0
ファイル: vidbux.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[vidbux.py] url="+page_url)
    if ".html" not in page_url:
        logger.info("[vidbux.py] URL incompleta")
        data = scrapertools.cache_page(page_url)
        patron = '<input name="fname" type="hidden" value="([^"]+)">'
        matches = re.compile(patron,re.DOTALL).findall(data)
        page_url = page_url+"/"+matches[0]+".html"

        
    # Lo pide una vez
    scrapertools.cache_page( page_url , headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']] )
    
    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    patron = 'http\:\/\/www\.vidbux\.com/([^\/]+)/(.*?)\.html'
    matches = re.compile(patron,re.DOTALL).findall(page_url)
    logger.info("[vidbux.py] fragmentos de la URL")
    scrapertools.printMatches(matches)
    
    codigo = ""
    nombre = ""
    if len(matches)>0:
        codigo = matches[0][0]
        nombre = matches[0][1]

    post = "op=download1&usr_login=&id="+codigo+"&fname="+nombre+"&referer=&method_free=Free+Stream"
    data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
    
    # Extrae el trozo cifrado
    patron = '<div id="embedcontmvshre"[^>]+>(.*?)</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches)>0:
        data = matches[0]
        logger.info("[vidbux.py] bloque packed="+data)
    else:
        logger.info("[vidbux.py] no encuentra bloque packed="+data)

        return ""
    
    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    
    # Extrae la URL del vídeo
    logger.info("descifrado="+descifrado)
    # Extrae la URL
    patron = '<param name="src"value="([^"]+)"/>'
    matches = re.compile(patron,re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    
    video_urls = []
    
    if len(matches)>0:
        video_urls.append( ["[vidbux]",matches[0]])

    for video_url in video_urls:
        logger.info("[vidbux.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #9
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[nosvideo.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    # Lee la URL
    data = scrapertools.cache_page(page_url)
    bloque = scrapertools.get_match(data, '<Form method="POST"(.*)</.orm>')
    #logger.info("bloque="+bloque)
    op = scrapertools.get_match(
        bloque, '<input type="hidden" name="op" value="([^"]+)"')
    id = scrapertools.get_match(
        bloque, '<input type="hidden" name="id" value="([^"]+)"')
    rand = scrapertools.get_match(
        bloque, '<input type="hidden" name="rand" value="([^"]*)"')
    referer = scrapertools.get_match(
        bloque, '<input type="hidden" name="referer" value="([^"]*)"')
    usr_login = scrapertools.get_match(
        bloque, '<input type="hidden" name="usr_login" value="([^"]*)"')
    fname = scrapertools.get_match(
        bloque, '<input type="hidden" name="fname" value="([^"]+)"')
    method_free = scrapertools.get_match(
        bloque, '<input type="[^"]+" name="method_free" value="([^"]*)"')
    method_premium = scrapertools.get_match(
        bloque, '<input type="[^"]+" name="method_premium" value="([^"]*)"')

    # Simula el botón
    #op=download1&id=iij5rw25kh4c&rand=&referer=&usr_login=&fname=TED-TS-Screener.Castellano.Ro_dri.avi&method_free=&method_premium=&down_script=1&method_free=Continue+to+Video
    post = "op=" + op + "&id=" + id + "&rand=" + rand + "&referer=" + referer + "&usr_login="******"&fname=" + fname + "&method_free=&method_premium=" + method_premium + "&down_script=1&method_free=" + method_free
    data = scrapertools.cache_page(page_url, post=post)
    #logger.info("data="+data)

    # Saca el bloque packed y lo descifra
    packed = scrapertools.get_match(
        data,
        "(<script type='text/javascript'>eval\(function\(p,a,c,k,e,d\).*?</script>)"
    )
    from core import unpackerjs
    unpacked = unpackerjs.unpackjs(packed)
    logger.info("unpacked=" + unpacked)

    # Extrae el descriptor
    playlist = scrapertools.get_match(unpacked, "playlist\=(.*?\.xml)")
    data = scrapertools.cache_page(playlist)
    location = scrapertools.get_match(data, "<file>([^<]+)</file>")

    video_urls.append([
        scrapertools.get_filename_from_url(location)[-4:] + " [nosvideo]",
        location
    ])

    for video_url in video_urls:
        logger.info("[nosvideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #10
0
ファイル: vidxden.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):

    logger.info("[vidxden.py] url="+page_url)

    # Lo pide una vez
    headers = []
    headers.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
    data = scrapertools.cache_page( page_url , headers=headers )
    fname = scrapertools.get_match(data,'<input name="fname" type="hidden" value="([^"]+)">')
    codigo = scrapertools.get_match(page_url,'vidxden.com/(\w+)')

    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    #op=download1&usr_login=&id=qtrv0ufkz3e4&fname=El_cazador_de_sue_os-dvd.avi&referer=&method_free=Continue+to+Video
    headers.append(['Referer',page_url])
    post = "op=download1&usr_login=&id="+codigo+"&fname="+fname+"&referer=&method_free=Continue+to+Video"
    data = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("data="+data)

    # Extrae el trozo cifrado
    #<div id="embedcontmvshre" style="position: absolute; top: 0; left: 0; visibility: hidden;"><script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('1i.1h(\'<8 10="1g"1f="1e:1d-1c-1b-1a-19"q="p"o="n"18="3://b.7.4/a/17.16"><2 1="u"0="t"/><2 1="s"0="r"/><2 1="m"0="3://i/l/6.k"/><2 1="f"0="5"><2 1="g"0="5"/><2 1="e"0="c"/><2 1="j"0="h"/><2 1="z"0="3://y.x.4:w/d/v/6"/><9 10="15"14="13/7"z="3://y.x.4:w/d/v/6"u="t"s="r"q="p"o="n"m="3://i/l/6.k"j="h"g="5"f="5"e="c"12="3://b.7.4/a/11/"></9></8>\');',36,55,'value|name|param|http|com|false|qtrv0ufkz3e4|divx|object|embed|plugin|go|Play||previewMessage|allowContextMenu|bannerEnabled|true||autoPlay|jpg|00249|previewImage|318|height|640|width|transparent|wmode|Stage6|custommode|opujxvaorizu2mdg6fst2fjdzlrn4p437h3lsbz5fjkxs|364|divxden|s31|src|id|download|pluginspage|video|type|np_vid|cab|DivXBrowserPlugin|codebase|CC0F21721616|9C46|41fa|D0AB|67DABFBF|clsid|classid|ie_vid|write|document'.split('|')))</script></div>
    patron = "(<script type='text/javascript'>eval\(function.*?</script>)"
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches)>0:
        data = matches[0]
        logger.info("[vidxden.py] bloque packed="+data)
    else:
        logger.info("[vidxden.py] no encuentra bloque packed="+data)

        return ""
    
    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    
    # Extrae la URL del vídeo
    logger.info("descifrado="+descifrado)
    # Extrae la URL
    patron = '<param name="src"value="([^"]+)"/>'
    matches = re.compile(patron,re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    if len(matches)==0:
        descifrado = descifrado.replace("\\","")
        patron = "file','([^']+)'"
        matches = re.compile(patron,re.DOTALL).findall(descifrado)
        scrapertools.printMatches(matches)
    
    video_urls = []

    if len(matches)>0:
        video_urls.append( ["[vidxden]",matches[0]+"|Referer="+urllib.quote(page_url)+"&User-Agent="+urllib.quote('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')])

    for video_url in video_urls:
        logger.info("[vidxden.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #11
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[meuvideos.py] url="+page_url)
    if not "embed" in page_url:
      page_url = page_url.replace("http://meuvideos.com/","http://meuvideos.com/embed-") + ".html"

    data = scrapertools.cache_page(page_url)
    data = "eval" + scrapertools.find_single_match(data,"<script type='text/javascript'>eval(.*?)</script>") 
    data = unpackerjs.unpackjs(data)
    url = scrapertools.get_match(data, 'file:"([^"]+)"')
    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url(url)[-4:]+" [meuvideos]",url])

    return video_urls
コード例 #12
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[vidbull.py] url=" + page_url)

    data = scrapertools.cache_page(
        page_url,
        headers=[[
            'User-Agent',
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'
        ]])
    logger.info("data=" + data)

    # Extrae el trozo cifrado
    '''
    <script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('2j 2=2i 2h(\'6://8.5/b/b.2g\',\'b\',\'2f\',\'2e\',\'9\');2.g(\'2d\',\'k\');2.g(\'2c\',\'2b\');2.g(\'2a\',\'29\');2.4(\'28\',\'../b/27.26\');2.4(\'25\',\'24\');2.4(\'l\',\'6://n.8.5:23/d/21/20.1z\');2.4(\'1y\',\'6://n.8.5/i/1x/e.1w\');2.4(\'1v\',\'6\');2.4(\'1u.j\',\'1t\');2.4(\'1s\',\'1r\');2.4(\'1q\',\'f-3\');2.4(\'f.h\',\'6://8.5/e\');2.4(\'f.1p\',\'%1o%1n%1m%1l%1k%1j%1i.5%1h-e-1g.1f%22%1e%c%1d%c%1c%c%1b%1a%19%18%17%16%m%14%13%m\');2.4(\'a.l\',\'6://8.5/12/11.10\');2.4(\'a.z\',\'k\');2.4(\'a.y\',\'15\');2.4(\'a.x\',\'1\');2.4(\'a.w\',\'0.7\');2.4(\'a.j\',\'v-u\');2.4(\'a.h\',\'6://8.5\');2.4(\'t\',\'s r\');2.4(\'q\',\'6://8.5\');2.p(\'o\');',36,92,'||s1||addVariable|com|http||vidbull||logo|player|3D0||erk3r6bpfyxy|sharing|addParam|link||position|true|file|3E|fs11|flvplayer|write|aboutlink|dlf|VidBull|abouttext|right|top|out|over|timeout|hide|png|vidbull_playerlogo|images|2FIFRAME|3C||3D338|20HEIGHT|3D640|20WIDTH|3DNO|20SCROLLING|20MARGINHEIGHT|20MARGINWIDTH|20FRAMEBORDER|html|640x318|2Fembed|2Fvidbull|2F|3A|22http|3D|20SRC|3CIFRAME|code|plugins|uniform|stretching|left|dock|provider|jpg|00031|image|flv|video|45sbu63kljrwuxim7e6xp2koxj4sxpaxyivgkrzwu27ggj5rdrrayurf||182|2606|duration|zip|modieus1|skin|opaque|wmode|always|allowscriptaccess|allowfullscreen|318|640|swf|SWFObject|new|var'.split('|')))
    '''
    patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d\).*?)</script>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    cifrado = ""
    for match in matches:
        logger.info("match=" + match)
        if "mp4" in match or "flv" in match or "video" in match:
            cifrado = match
            break

    # Extrae la URL del vídeo
    logger.info("cifrado=" + cifrado)
    descifrado = unpackerjs.unpackjs(cifrado)
    descifrado = descifrado.replace("\\", "")
    logger.info("descifrado=" + descifrado)

    # Extrae la URL
    media_url = scrapertools.get_match(descifrado,
                                       "addVariable\('file','([^']+)'")

    video_urls = []

    if len(matches) > 0:
        video_urls.append([
            scrapertools.get_filename_from_url(media_url)[-4:] + " [vidbull]",
            media_url
        ])

    for video_url in video_urls:
        logger.info("[vidbull.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #13
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[ovfile.py] url=" + page_url)

    if page_url.startswith('http'):
        page_url = extract_id(page_url)
        if page_url == "": return []

    page_url = 'http://ovfile.com/embed-' + page_url + '-600x340.html'
    # Lo pide una vez
    data = scrapertools.cache_page(page_url)

    # Extrae el trozo cifrado
    patron = "src='http://ovfile.com/player/swfobject.js'></script>[^<]+"
    patron += "<script type='text/javascript'>(.*?)</script>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches) > 0:
        data = matches[0]
        logger.info("[ovfile.py] bloque packed=" + data)
    else:
        logger.info("[ovfile.py] no encuentra bloque packed=" + data)

        return ""

    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    descifrado = descifrado.replace("\\'", "'")
    # Extrae la URL del vídeo
    logger.info("descifrado=" + descifrado)
    # Extrae la URL
    patron = "'file','([^']+)'"
    matches = re.compile(patron, re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)

    video_urls = []

    if len(matches) > 0:
        url = "%s?file=%s" % (matches[0], matches[0])
        video_urls.append(
            ["." + matches[0].rsplit('.', 1)[1] + " [ovfile]", url])

    for video_url in video_urls:
        logger.info("[ovfile.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #14
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[allbox4.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    # Descarga
    data = scrapertools.cache_page(page_url)

    try:
        paramstext = scrapertools.get_match(
            data, '<param name="flashvars" value="([^"]+)"')
        params = paramstext.split("&")

        file = ""
        token = ""
        start = ""
        for param in params:
            if param.startswith("file="):
                file = param[5:]
            if param.startswith("token="):
                token = param[6:]
            if param.startswith("start="):
                start = param[6:]

        media_url = file + "?" + start + "&" + token
    except:
        packed = scrapertools.get_match(
            data,
            "<div id=\"player_coded\">(<script type='text/javascript'>eval\(function\(p,a,c,k,e,d.*?</script>)</div>"
        )
        from core import unpackerjs
        unpacked = unpackerjs.unpackjs(packed)
        logger.info("unpacked=" + unpacked)
        media_url = scrapertools.get_match(
            unpacked, '<embed id="np_vid"type="video/divx"src="([^"]+)"')

    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(media_url)[-4:] + " [allbox4]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("[allbox4.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #15
0
ファイル: ovfile.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[ovfile.py] url="+page_url)

    if page_url.startswith('http'):
        page_url = extract_id(page_url)
        if page_url=="":return []

    page_url = 'http://ovfile.com/embed-'+page_url+'-600x340.html'
    # Lo pide una vez
    data = scrapertools.cache_page( page_url)
    

    # Extrae el trozo cifrado
    patron = "src='http://ovfile.com/player/swfobject.js'></script>[^<]+"
    patron +="<script type='text/javascript'>(.*?)</script>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches)>0:
        data = matches[0]
        logger.info("[ovfile.py] bloque packed="+data)
    else:
        logger.info("[ovfile.py] no encuentra bloque packed="+data)

        return ""
    
    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    descifrado = descifrado.replace("\\'","'")
    # Extrae la URL del vídeo
    logger.info("descifrado="+descifrado)
    # Extrae la URL
    patron = "'file','([^']+)'"
    matches = re.compile(patron,re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    
    video_urls = []
    
    if len(matches)>0:
        url = "%s?file=%s" %(matches[0],matches[0])
        video_urls.append( ["."+matches[0].rsplit('.',1)[1]+" [ovfile]",url])

    for video_url in video_urls:
        logger.info("[ovfile.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #16
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("videostoring get_video_url(page_url='%s')" % page_url)
    if not "embed" in page_url:
        page_url = page_url.replace("http://www.videostoring.com/", "http://www.videostoring.com/embed-") + ".html"

    data = scrapertools.cache_page(page_url)
    data = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")
    data = unpackerjs.unpackjs(data)

    url = scrapertools.get_match(data, '<param name="src"value="([^"]+)"/>')
    video_urls = []
    video_urls.append([scrapertools.get_filename_from_url(url)[-4:] + " [videostoring]", url])

    for video_url in video_urls:
        logger.info("[videostoring.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #17
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[meuvideos.py] url=" + page_url)
    if not "embed" in page_url:
        page_url = page_url.replace("http://meuvideos.com/",
                                    "http://meuvideos.com/embed-") + ".html"

    data = scrapertools.cache_page(page_url)
    data = "eval" + scrapertools.find_single_match(
        data, "<script type='text/javascript'>eval(.*?)</script>")
    data = unpackerjs.unpackjs(data)
    url = scrapertools.get_match(data, 'file:"([^"]+)"')
    video_urls = []
    video_urls.append(
        [scrapertools.get_filename_from_url(url)[-4:] + " [meuvideos]", url])

    return video_urls
コード例 #18
0
ファイル: nosvideo.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[nosvideo.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []
    
    # Lee la URL
    data = scrapertools.cache_page( page_url )
    bloque = scrapertools.get_match(data,'<Form method="POST"(.*)</.orm>')
    #logger.info("bloque="+bloque)
    op = scrapertools.get_match(bloque,'<input type="hidden" name="op" value="([^"]+)"')
    id = scrapertools.get_match(bloque,'<input type="hidden" name="id" value="([^"]+)"')
    rand = scrapertools.get_match(bloque,'<input type="hidden" name="rand" value="([^"]*)"')
    referer = scrapertools.get_match(bloque,'<input type="hidden" name="referer" value="([^"]*)"')
    usr_login = scrapertools.get_match(bloque,'<input type="hidden" name="usr_login" value="([^"]*)"')
    fname = scrapertools.get_match(bloque,'<input type="hidden" name="fname" value="([^"]+)"')
    method_free = scrapertools.get_match(bloque,'<input type="[^"]+" name="method_free" value="([^"]*)"')
    method_premium = scrapertools.get_match(bloque,'<input type="[^"]+" name="method_premium" value="([^"]*)"')

    # Simula el botón
    #op=download1&id=iij5rw25kh4c&rand=&referer=&usr_login=&fname=TED-TS-Screener.Castellano.Ro_dri.avi&method_free=&method_premium=&down_script=1&method_free=Continue+to+Video
    post = "op="+op+"&id="+id+"&rand="+rand+"&referer="+referer+"&usr_login="******"&fname="+fname+"&method_free=&method_premium="+method_premium+"&down_script=1&method_free="+method_free
    data = scrapertools.cache_page( page_url , post=post )
    #logger.info("data="+data)

    # Saca el bloque packed y lo descifra
    packed = scrapertools.get_match(data,"(<script type='text/javascript'>eval\(function\(p,a,c,k,e,d\).*?</script>)")
    from core import unpackerjs
    unpacked = unpackerjs.unpackjs(packed)
    logger.info("unpacked="+unpacked)
    
    # Extrae el descriptor
    playlist = scrapertools.get_match(unpacked,"playlist\=(.*?\.xml)")
    data = scrapertools.cache_page( playlist )
    location = scrapertools.get_match(data,"<file>([^<]+)</file>")
    
    video_urls.append( [ scrapertools.get_filename_from_url(location)[-4:] + " [nosvideo]",location ] )

    for video_url in video_urls:
        logger.info("[nosvideo.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #19
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[allbox4.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    # Descarga
    data = scrapertools.cache_page( page_url )

    try:
        paramstext = scrapertools.get_match( data , '<param name="flashvars" value="([^"]+)"')
        params = paramstext.split("&")

        file=""
        token=""
        start=""
        for param in params:
            if param.startswith("file="):
               file=param[5:] 
            if param.startswith("token="):
               token=param[6:] 
            if param.startswith("start="):
               start=param[6:] 
        
        media_url = file+"?"+start+"&"+token
    except:
        packed = scrapertools.get_match(data,"<div id=\"player_coded\">(<script type='text/javascript'>eval\(function\(p,a,c,k,e,d.*?</script>)</div>")
        from core import unpackerjs
        unpacked = unpackerjs.unpackjs(packed)
        logger.info("unpacked="+unpacked)
        media_url = scrapertools.get_match(unpacked,'<embed id="np_vid"type="video/divx"src="([^"]+)"')

    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [allbox4]",media_url ] )

    for video_url in video_urls:
        logger.info("[allbox4.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #20
0
ファイル: vidbull.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[vidbull.py] url="+page_url)
        
    data = scrapertools.cache_page( page_url , headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']] )
    logger.info("data="+data)

    # Extrae el trozo cifrado
    '''
    <script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('2j 2=2i 2h(\'6://8.5/b/b.2g\',\'b\',\'2f\',\'2e\',\'9\');2.g(\'2d\',\'k\');2.g(\'2c\',\'2b\');2.g(\'2a\',\'29\');2.4(\'28\',\'../b/27.26\');2.4(\'25\',\'24\');2.4(\'l\',\'6://n.8.5:23/d/21/20.1z\');2.4(\'1y\',\'6://n.8.5/i/1x/e.1w\');2.4(\'1v\',\'6\');2.4(\'1u.j\',\'1t\');2.4(\'1s\',\'1r\');2.4(\'1q\',\'f-3\');2.4(\'f.h\',\'6://8.5/e\');2.4(\'f.1p\',\'%1o%1n%1m%1l%1k%1j%1i.5%1h-e-1g.1f%22%1e%c%1d%c%1c%c%1b%1a%19%18%17%16%m%14%13%m\');2.4(\'a.l\',\'6://8.5/12/11.10\');2.4(\'a.z\',\'k\');2.4(\'a.y\',\'15\');2.4(\'a.x\',\'1\');2.4(\'a.w\',\'0.7\');2.4(\'a.j\',\'v-u\');2.4(\'a.h\',\'6://8.5\');2.4(\'t\',\'s r\');2.4(\'q\',\'6://8.5\');2.p(\'o\');',36,92,'||s1||addVariable|com|http||vidbull||logo|player|3D0||erk3r6bpfyxy|sharing|addParam|link||position|true|file|3E|fs11|flvplayer|write|aboutlink|dlf|VidBull|abouttext|right|top|out|over|timeout|hide|png|vidbull_playerlogo|images|2FIFRAME|3C||3D338|20HEIGHT|3D640|20WIDTH|3DNO|20SCROLLING|20MARGINHEIGHT|20MARGINWIDTH|20FRAMEBORDER|html|640x318|2Fembed|2Fvidbull|2F|3A|22http|3D|20SRC|3CIFRAME|code|plugins|uniform|stretching|left|dock|provider|jpg|00031|image|flv|video|45sbu63kljrwuxim7e6xp2koxj4sxpaxyivgkrzwu27ggj5rdrrayurf||182|2606|duration|zip|modieus1|skin|opaque|wmode|always|allowscriptaccess|allowfullscreen|318|640|swf|SWFObject|new|var'.split('|')))
    '''
    patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d\).*?)</script>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    cifrado=""
    for match in matches:
        logger.info("match="+match)
        if "mp4" in match or "flv" in match or "video" in match:
            cifrado = match
            break
    
    # Extrae la URL del vídeo
    logger.info("cifrado="+cifrado)
    descifrado = unpackerjs.unpackjs(cifrado)
    descifrado = descifrado.replace("\\","")
    logger.info("descifrado="+descifrado)
    
    # Extrae la URL
    media_url = scrapertools.get_match(descifrado,"addVariable\('file','([^']+)'")
    
    video_urls = []
    
    if len(matches)>0:
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [vidbull]",media_url])

    for video_url in video_urls:
        logger.info("[vidbull.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #21
0
ファイル: gamovideo.py プロジェクト: trunca/pelisalacarta
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.gamovideo get_video_url(page_url='%s')" % page_url)
    if not "embed" in page_url:
      page_url = page_url.replace("http://gamovideo.com/","http://gamovideo.com/embed-") + "-640x360.html"

    data = scrapertools.cache_page(page_url)
    data = scrapertools.find_single_match(data,"<script type='text/javascript'>(.*?)</script>")
    data = unpackerjs.unpackjs(data)
    
    host = scrapertools.get_match(data, 'image:"(http://[^/]+/)')
    flv_url = scrapertools.get_match(data, ',\{file:"([^"]+)"')
    rtmp_url = scrapertools.get_match(data, '\[\{file:"([^"]+)"')
    flv = host+flv_url.split("=")[1]+"/v.flv"

    video_urls = []
    video_urls.append([scrapertools.get_filename_from_url(flv)[-4:]+" [gamovideo]",flv])
    #video_urls.append(["RTMP [gamovideo]",rtmp_url])      


    for video_url in video_urls:
        logger.info("[gamovideo.py] %s - %s" % (video_url[0],video_url[1]))
        

    return video_urls
コード例 #22
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.gamovideo get_video_url(page_url='%s')" %
                page_url)

    # Lo pide una vez
    headers = [[
        'User-Agent',
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'
    ]]
    data = scrapertools.cache_page(page_url, headers=headers)
    #logger.info("data="+data)

    try:
        '''
        <input type="hidden" name="op" value="download1">
        <input type="hidden" name="usr_login" value="">
        <input type="hidden" name="id" value="auoxxtvyquoy">
        <input type="hidden" name="fname" value="Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi">
        <input type="hidden" name="referer" value="">
        <input type="hidden" name="hash" value="1624-83-46-1377796069-b5e6b8f9759d080a3667adad637f00ac">
        <input type="submit" name="imhuman" value="Continue to Video" id="btn_download">
        '''
        op = scrapertools.get_match(
            data, '<input type="hidden" name="op" value="(down[^"]+)"')
        usr_login = ""
        id = scrapertools.get_match(
            data, '<input type="hidden" name="id" value="([^"]+)"')
        fname = scrapertools.get_match(
            data, '<input type="hidden" name="fname" value="([^"]+)"')
        referer = scrapertools.get_match(
            data, '<input type="hidden" name="referer"\s+value="([^"]*)"')
        hashvalue = scrapertools.get_match(
            data, '<input type="hidden" name="hash" value="([^"]*)"')
        submitbutton = scrapertools.get_match(
            data,
            '<input type="submit" name="imhuman" value="([^"]+)"').replace(
                " ", "+")

        import time
        time.sleep(5)

        # Lo pide una segunda vez, como si hubieras hecho click en el banner
        #op=download1&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796019-c2b422f91da55d12737567a14ea3dffe&imhuman=Continue+to+Video
        #op=search&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796398-8020e5629f50ff2d7b7de99b55bdb177&imhuman=Continue+to+Video
        post = "op=" + op + "&usr_login="******"&id=" + id + "&fname=" + fname + "&referer=" + referer + "&hash=" + hashvalue + "&imhuman=" + submitbutton
        headers.append(["Referer", page_url])
        data = scrapertools.cache_page(page_url, post=post, headers=headers)
        #logger.info("data="+data)
    except:
        import traceback
        traceback.print_exc()

    # Extrae la URL
    logger.info("data=" + data)
    data = scrapertools.find_single_match(
        data, "<script type='text/javascript'>(.*?)</script>")
    logger.info("data=" + data)
    data = unpackerjs.unpackjs(data)
    logger.info("data=" + data)

    pfile = scrapertools.get_match(data, 'file\s*\:\s*"([^"]+)"')
    pstreamer = scrapertools.get_match(data, 'streamer\s*\:\s*"([^"]+)"')

    media_url = pstreamer + " playpath=" + pfile

    video_urls = []
    video_urls.append(["RTMP [gamovideo]", media_url])

    for video_url in video_urls:
        logger.info("[gamovideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #23
0
ファイル: metadivx.py プロジェクト: pablobart/tvalacarta-web
def geturl(urlvideo):
    logger.info("[metadivx.py] url="+urlvideo)
    # ---------------------------------------
    #  Inicializa la libreria de las cookies
    # ---------------------------------------
    ficherocookies = COOKIEFILE
    try:
        os.remove(ficherocookies)
    except:
        pass
    # the path and filename to save your cookies in

    cj = None
    ClientCookie = None
    cookielib = None

    # Let's see if cookielib is available
    try:
        import cookielib
    except ImportError:
        # If importing cookielib fails
        # let's try ClientCookie
        try:
            import ClientCookie
        except ImportError:
            # ClientCookie isn't available either
            urlopen = urllib2.urlopen
            Request = urllib2.Request
        else:
            # imported ClientCookie
            urlopen = ClientCookie.urlopen
            Request = ClientCookie.Request
            cj = ClientCookie.LWPCookieJar()

    else:
        # importing cookielib worked
        urlopen = urllib2.urlopen
        Request = urllib2.Request
        cj = cookielib.LWPCookieJar()
        # This is a subclass of FileCookieJar
        # that has useful load and save methods

    # ---------------------------------
    # Instala las cookies
    # ---------------------------------

    if cj is not None:
    # we successfully imported
    # one of the two cookie handling modules

        if os.path.isfile(ficherocookies):
            # if we have a cookie file already saved
            # then load the cookies into the Cookie Jar
            cj.load(ficherocookies)

        # Now we need to get our Cookie Jar
        # installed in the opener;
        # for fetching URLs
        if cookielib is not None:
            # if we use cookielib
            # then we get the HTTPCookieProcessor
            # and install the opener in urllib2
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            urllib2.install_opener(opener)

        else:
            # if we use ClientCookie
            # then we get the HTTPCookieProcessor
            # and install the opener in ClientCookie
            opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
            ClientCookie.install_opener(opener)

    #print "-------------------------------------------------------"
    url=urlvideo
    #print url
    #print "-------------------------------------------------------"
    theurl = url
    # an example url that sets a cookie,
    # try different urls here and see the cookie collection you can make !

    txdata = None
    # if we were making a POST type request,
    # we could encode a dictionary of values here,
    # using urllib.urlencode(somedict)

    txheaders =  {'User-Agent':'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'}
    # fake a user agent, some websites (like google) don't like automated exploration

    req = Request(theurl, txdata, txheaders)
    handle = urlopen(req)
    cj.save(ficherocookies)                     # save the cookies again    

    data=handle.read()
    handle.close()
    #print data

    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    patron = 'http\:\/\/www\.metadivx\.com/([^\/]+)/(.*?)\.html'
    matches = re.compile(patron,re.DOTALL).findall(url)
    logger.info("[metadivx.py] fragmentos de la URL")
    scrapertools.printMatches(matches)
    
    codigo = ""
    nombre = ""
    if len(matches)>0:
        codigo = matches[0][0]
        nombre = matches[0][1]

    txdata = "op=download1&usr_login=&id="+codigo+"&fname="+nombre+"&referer=&method_free=Continue"
    logger.info(txdata)
    req = Request(theurl, txdata, txheaders)
    handle = urlopen(req)
    cj.save(ficherocookies)                     # save the cookies again    

    data=handle.read()
    handle.close()
    #print data
    
    patron = '<div id="embedcontmvshre[^>]+>(.*?)</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    logger.info("[metadivx.py] bloque packed")
    if len(matches)>0:
        logger.info(matches[0])
    '''
    <center>
    <script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('1e.1d(\'<7 w="1c"1b="1a:19-18-17-16-15"h="g"f="e"14="4://a.6.3/9/13.12"><2 1="j"0="i"><2 1="v"0="u"><2 1="b"0="5"/><2 1="c"0="5"/><2 1="t"0="4://s.r.q.p:o/d/n/m.3.-l.k"/><8 w="11"v="u"10="z/6"t="4://s.r.q.p:o/d/n/m.3.-l.k"j="i"h="g"f="e"c="5"b="5"y="4://a.6.3/9/x/"></8></7>\');',36,51,'value|name|param|com|http|false|divx|object|embed|plugin|go|bannerEnabled|autoPlay||320px|height|630px|width|none|custommode|avi|El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_|Capitancinema|pfq3vaf2xypwtrv77uw334hb55ctx5tcd6dva|182|206|45|73|76|src|auto|bufferingMode|id|download|pluginspage|video|type|embedmvshre|cab|DivXBrowserPlugin|codebase|CC0F21721616|9C46|41fa|D0AB|67DABFBF|clsid|classid|embedcontmvshre|write|document'.split('|')))
    </script>
    </center>
    '''
    # El javascript empaquetado es
    #eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('1e.1d(\'<7 w="1c"1b="1a:19-18-17-16-15"h="g"f="e"14="4://a.6.3/9/13.12"><2 1="j"0="i"><2 1="v"0="u"><2 1="b"0="5"/><2 1="c"0="5"/><2 1="t"0="4://s.r.q.p:o/d/n/m.3.-l.k"/><8 w="11"v="u"10="z/6"t="4://s.r.q.p:o/d/n/m.3.-l.k"j="i"h="g"f="e"c="5"b="5"y="4://a.6.3/9/x/"></8></7>\');',36,51,'value|name|param|com|http|false|divx|object|embed|plugin|go|bannerEnabled|autoPlay||320px|height|630px|width|none|custommode|avi|El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_|Capitancinema|pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa|182|206|45|73|76|src|auto|bufferingMode|id|download|pluginspage|video|type|embedmvshre|cab|DivXBrowserPlugin|codebase|CC0F21721616|9C46|41fa|D0AB|67DABFBF|clsid|classid|embedcontmvshre|write|document'.split('|')))
    '''
    eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('1e.1d(\'
    <7 w="1c"1b="1a:19-18-17-16-15"h="g"f="e"14="4://a.6.3/9/13.12">
    <2 1="j"0="i">
    <2 1="v"0="u">
    <2 1="b"0="5"/>
    <2 1="c"0="5"/>
    <2 1="t"0="4://s.r.q.p:o/d/n/m.3.-l.k"/>
    <8 w="11"v="u"10="z/6"t="4://s.r.q.p:o/d/n/m.3.-l.k"j="i"h="g"f="e"c="5"b="5"y="4://a.6.3/9/x/">
    <embed id="embedmvshre"bufferingMode="auto"type="video/divx"src="http://76.73.45.206:182/d/pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa/Capitancinema.com.-El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_.avi"custommode="none"width="630px"height="320px"autoPlay="false"bannerEnabled="false"pluginspage="http://go.divx.com/plugin/download/">
    </8>
    </7>\');',36,51,
    0'value
    1|name
    2|param
    3|com
    4|http
    5|false
    6|divx
    7|object
    8|embed
    9|plugin
    a|go
    b|bannerEnabled
    c|autoPlay
    d|
    e|320px
    f|height
    g|630px
    h|width
    i|none
    j|custommode
    k|avi
    l|El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_
    m|Capitancinema
    n|pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa
    o|182
    p|206
    q|45
    r|73
    s|76
    t|src
    u|auto
    v|bufferingMode
    w|id
    x|download
    y|pluginspage
    z|video
    10|type
    11|embedmvshre
    12|cab
    13|DivXBrowserPlugin
    14|codebase
    15|CC0F21721616
    16|9C46
    17|41fa
    18|D0AB
    19|67DABFBF
    1a|clsid
    1b|classid
    1c|embedcontmvshre
    1d|write
    1e|document
    '.split('
    |')))
    '''
    # El javascript desempaquetado es
    #document.write('<object id="embedcontmvshre"classid="clsid:67DABFBF-D0AB-41fa-9C46-CC0F21721616"width="630px"height="320px"codebase="http://go.divx.com/plugin/DivXBrowserPlugin.cab"><param name="custommode"value="none"><param name="bufferingMode"value="auto"><param name="bannerEnabled"value="false"/><param name="autoPlay"value="false"/><param name="src"value="http://76.73.45.206:182/d/pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa/Capitancinema.com.-El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_.avi"/><embed id="embedmvshre"bufferingMode="auto"type="video/divx"src="http://76.73.45.206:182/d/pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa/Capitancinema.com.-El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_.avi"custommode="none"width="630px"height="320px"autoPlay="false"bannerEnabled="false"pluginspage="http://go.divx.com/plugin/download/"></embed></object>');
    '''
    <object id="embedcontmvshre"classid="clsid:67DABFBF-D0AB-41fa-9C46-CC0F21721616"width="630px"height="320px"codebase="http://go.divx.com/plugin/DivXBrowserPlugin.cab">
    <param name="custommode"value="none">
    <param name="bufferingMode"value="auto">
    <param name="bannerEnabled"value="false"/>
    <param name="autoPlay"value="false"/>
    <param name="src"value="http://76.73.45.206:182/d/pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa/Capitancinema.com.-El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_.avi"/>
    <embed id="embedmvshre"bufferingMode="auto"type="video/divx"src="http://76.73.45.206:182/d/pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa/Capitancinema.com.-El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_.avi"custommode="none"width="630px"height="320px"autoPlay="false"bannerEnabled="false"pluginspage="http://go.divx.com/plugin/download/">
    </embed>
    </object>');
    '''
    # La URL del video es 
    #http://76.73.45.206:182/d/pfq3vaf2xypwtrv77uw334hb55ctx5qa5wdfa/Capitancinema.com.-El_Concierto__BrSc__Spanish_HOMIEZTEAM__2010_.avi
    
    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    logger.info("descifrado="+descifrado)
    
    # Extrae la URL
    patron = '<param name="src"value="([^"]+)"/>'
    matches = re.compile(patron,re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    
    url = ""
    
    if len(matches)>0:
        url = matches[0]

    logger.info("[metadivx.py] url="+url)
    return url
コード例 #24
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[videopremium.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    # Lee la URL
    data = scrapertools.cache_page(page_url)
    logger.info("data=" + data)
    bloque = scrapertools.get_match(data, '<Form[^>]*method="POST"(.*)</.orm>')
    logger.info("bloque=" + bloque)
    op = scrapertools.get_match(
        bloque, '<input type="hidden" name="op" value="([^"]+)"')
    usr_login = scrapertools.get_match(
        bloque, '<input type="hidden" name="usr_login" value="([^"]*)"')
    id = scrapertools.get_match(
        bloque, '<input type="hidden" name="id" value="([^"]+)"')
    fname = scrapertools.get_match(
        bloque, '<input type="hidden" name="fname" value="([^"]+)"')
    referer = scrapertools.get_match(
        bloque, '<input type="hidden" name="referer" value="([^"]*)"')
    method_free = scrapertools.get_match(
        bloque, '<input type="[^"]+" name="method_free" value="([^"]+)"')

    # Simula el botón
    #op=download1&usr_login=&id=buq4b8zunbm6&fname=Snow.Buddies-Avventura.In.Alaska.2008.iTALiAN.AC3.DVDRip.H264-PsYcOcReW.avi&referer=&method_free=Watch+Free%21
    post = "op=" + op + "&usr_login="******"&id=" + id + "&fname=" + fname + "&referer=" + referer + "&method_free=" + method_free
    data = scrapertools.cache_page(page_url, post=post)
    #logger.info("data="+data)

    try:
        packed = scrapertools.get_match(
            data,
            "(<script type='text/javascript'>eval\(function\(p,a,c,k,e,d\).*?</script>)"
        )
    except:
        packed = scrapertools.get_match(
            data, "(function\(p, a, c, k, e, d\).*?</script>)")
        packed = "<script type='text/javascript'>eval(" + packed

    logger.info("packed=" + packed)

    from core import unpackerjs
    unpacked = unpackerjs.unpackjs(packed)
    logger.info("unpacked=" + unpacked)
    '''
    23:47:40 T:2955980800  NOTICE: unpacked=('var vast=\'\';var flashvars={"comment":"VideoPremium.NET","st":"http://videopremium.net/uplayer/styles/video156-623.txt",
        "file":"rtmp://tengig0.lb.videopremium.net/play/mp4:8x0mq9hanl3a.f4v",
        p2pkey:"mp4:8x0mq9hanl3a.f4v",vast_preroll:vast};var params={bgcolor:"#ffffff",allowFullScreen:"true",allowScriptAccess:"always",id:"vplayer"};new swfobject.embedSWF("http://videopremium.net/uplayer/uppod.swf","vplayer","728","450","9.0.115.0",false,flashvars,params);',,paramsflashvars,
    '''
    '''
    Property 'app' String 'play'
    Property 'swfUrl' String 'http://videopremium.net/uplayer/uppod.swf'
    Property 'pageUrl' String 'http://videopremium.net/8x0mq9hanl3a'
    Property 'tcUrl' String 'rtmp://e5.videopremium.net/play'
    play: String 'mp4:8x0mq9hanl3a.f4v'
    '''
    '''
    00:55:30 T:2955980800   ERROR: Valid RTMP options are:
    00:55:30 T:2955980800   ERROR:      socks string   Use the specified SOCKS proxy
    00:55:30 T:2955980800   ERROR:        app string   Name of target app on server
    00:55:30 T:2955980800   ERROR:      tcUrl string   URL to played stream
    00:55:30 T:2955980800   ERROR:    pageUrl string   URL of played media's web page
    00:55:30 T:2955980800   ERROR:     swfUrl string   URL to player SWF file
    00:55:30 T:2955980800   ERROR:   flashver string   Flash version string (default MAC 10,0,32,18)
    00:55:30 T:2955980800   ERROR:       conn AMF      Append arbitrary AMF data to Connect message
    00:55:30 T:2955980800   ERROR:   playpath string   Path to target media on server
    00:55:30 T:2955980800   ERROR:   playlist boolean  Set playlist before play command
    00:55:30 T:2955980800   ERROR:       live boolean  Stream is live, no seeking possible
    00:55:30 T:2955980800   ERROR:  subscribe string   Stream to subscribe to
    00:55:30 T:2955980800   ERROR:        jtv string   Justin.tv authentication token
    00:55:30 T:2955980800   ERROR:       weeb string   Weeb.tv authentication token
    00:55:30 T:2955980800   ERROR:      token string   Key for SecureToken response
    00:55:30 T:2955980800   ERROR:     swfVfy boolean  Perform SWF Verification
    00:55:30 T:2955980800   ERROR:     swfAge integer  Number of days to use cached SWF hash
    00:55:30 T:2955980800   ERROR:    swfsize integer  Size of the decompressed SWF file
    00:55:30 T:2955980800   ERROR:    swfhash string   SHA256 hash of the decompressed SWF file
    00:55:30 T:2955980800   ERROR:      start integer  Stream start position in milliseconds
    00:55:30 T:2955980800   ERROR:       stop integer  Stream stop position in milliseconds
    00:55:30 T:2955980800   ERROR:     buffer integer  Buffer time in milliseconds
    00:55:30 T:2955980800   ERROR:    timeout integer  Session timeout in seconds
    '''
    rtmpurl = scrapertools.get_match(unpacked, '"file"\:"([^"]+)"').replace(
        "tengig0.lb.videopremium.net", "e4.videopremium.net")
    playpath = scrapertools.get_match(unpacked, 'p2pkey\:"([^"]+)"')
    swfurl = scrapertools.get_match(unpacked, 'embedSWF\("([^"]+)"')
    pageurl = page_url
    app = "play"
    tcurl = "rtmp://e4.videopremium.net/play"
    location = rtmpurl + " playpath=" + playpath + " swfurl=" + swfurl + " pageUrl=" + page_url + " tcurl=" + tcurl + " app=" + app  #swfvfy=true

    logger.info("location=" + location)
    video_urls.append(["RTMP [videopremium]", location])

    return video_urls
コード例 #25
0
ファイル: vidxden.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):

    logger.info("[vidxden.py] url=" + page_url)

    # Lo pide una vez
    headers = []
    headers.append([
        'User-Agent',
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'
    ])
    data = scrapertools.cache_page(page_url, headers=headers)
    fname = scrapertools.get_match(
        data, '<input name="fname" type="hidden" value="([^"]+)">')
    codigo = scrapertools.get_match(page_url, 'vidxden.com/(\w+)')

    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    #op=download1&usr_login=&id=qtrv0ufkz3e4&fname=El_cazador_de_sue_os-dvd.avi&referer=&method_free=Continue+to+Video
    headers.append(['Referer', page_url])
    post = "op=download1&usr_login=&id=" + codigo + "&fname=" + fname + "&referer=&method_free=Continue+to+Video"
    data = scrapertools.cache_page(page_url, post=post, headers=headers)
    logger.info("data=" + data)

    # Extrae el trozo cifrado
    #<div id="embedcontmvshre" style="position: absolute; top: 0; left: 0; visibility: hidden;"><script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('1i.1h(\'<8 10="1g"1f="1e:1d-1c-1b-1a-19"q="p"o="n"18="3://b.7.4/a/17.16"><2 1="u"0="t"/><2 1="s"0="r"/><2 1="m"0="3://i/l/6.k"/><2 1="f"0="5"><2 1="g"0="5"/><2 1="e"0="c"/><2 1="j"0="h"/><2 1="z"0="3://y.x.4:w/d/v/6"/><9 10="15"14="13/7"z="3://y.x.4:w/d/v/6"u="t"s="r"q="p"o="n"m="3://i/l/6.k"j="h"g="5"f="5"e="c"12="3://b.7.4/a/11/"></9></8>\');',36,55,'value|name|param|http|com|false|qtrv0ufkz3e4|divx|object|embed|plugin|go|Play||previewMessage|allowContextMenu|bannerEnabled|true||autoPlay|jpg|00249|previewImage|318|height|640|width|transparent|wmode|Stage6|custommode|opujxvaorizu2mdg6fst2fjdzlrn4p437h3lsbz5fjkxs|364|divxden|s31|src|id|download|pluginspage|video|type|np_vid|cab|DivXBrowserPlugin|codebase|CC0F21721616|9C46|41fa|D0AB|67DABFBF|clsid|classid|ie_vid|write|document'.split('|')))</script></div>
    patron = "(<script type='text/javascript'>eval\(function.*?</script>)"
    matches = re.compile(patron, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches) > 0:
        data = matches[0]
        logger.info("[vidxden.py] bloque packed=" + data)
    else:
        logger.info("[vidxden.py] no encuentra bloque packed=" + data)

        return ""

    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)

    # Extrae la URL del vídeo
    logger.info("descifrado=" + descifrado)
    # Extrae la URL
    patron = '<param name="src"value="([^"]+)"/>'
    matches = re.compile(patron, re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    if len(matches) == 0:
        descifrado = descifrado.replace("\\", "")
        patron = "file','([^']+)'"
        matches = re.compile(patron, re.DOTALL).findall(descifrado)
        scrapertools.printMatches(matches)

    video_urls = []

    if len(matches) > 0:
        video_urls.append([
            "[vidxden]", matches[0] + "|Referer=" + urllib.quote(page_url) +
            "&User-Agent=" + urllib.quote(
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'
            )
        ])

    for video_url in video_urls:
        logger.info("[vidxden.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #26
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[videopremium.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []
   
    # Lee la URL
    data = scrapertools.cache_page( page_url )
    logger.info("data="+data)
    bloque = scrapertools.get_match(data,'<Form[^>]*method="POST"(.*)</.orm>')
    logger.info("bloque="+bloque)
    op = scrapertools.get_match(bloque,'<input type="hidden" name="op" value="([^"]+)"')
    usr_login = scrapertools.get_match(bloque,'<input type="hidden" name="usr_login" value="([^"]*)"')
    id = scrapertools.get_match(bloque,'<input type="hidden" name="id" value="([^"]+)"')
    fname = scrapertools.get_match(bloque,'<input type="hidden" name="fname" value="([^"]+)"')
    referer = scrapertools.get_match(bloque,'<input type="hidden" name="referer" value="([^"]*)"')
    method_free = scrapertools.get_match(bloque,'<input type="[^"]+" name="method_free" value="([^"]+)"')

    # Simula el botón
    #op=download1&usr_login=&id=buq4b8zunbm6&fname=Snow.Buddies-Avventura.In.Alaska.2008.iTALiAN.AC3.DVDRip.H264-PsYcOcReW.avi&referer=&method_free=Watch+Free%21
    post = "op="+op+"&usr_login="******"&id="+id+"&fname="+fname+"&referer="+referer+"&method_free="+method_free
    data = scrapertools.cache_page( page_url , post=post )
    #logger.info("data="+data)
   
    try:
        packed = scrapertools.get_match(data,"(<script type='text/javascript'>eval\(function\(p,a,c,k,e,d\).*?</script>)")
    except:
        packed = scrapertools.get_match(data,"(function\(p, a, c, k, e, d\).*?</script>)")
        packed = "<script type='text/javascript'>eval("+packed

    logger.info("packed="+packed)

    from core import unpackerjs
    unpacked = unpackerjs.unpackjs(packed)
    logger.info("unpacked="+unpacked)
    '''
    23:47:40 T:2955980800  NOTICE: unpacked=('var vast=\'\';var flashvars={"comment":"VideoPremium.NET","st":"http://videopremium.net/uplayer/styles/video156-623.txt",
        "file":"rtmp://tengig0.lb.videopremium.net/play/mp4:8x0mq9hanl3a.f4v",
        p2pkey:"mp4:8x0mq9hanl3a.f4v",vast_preroll:vast};var params={bgcolor:"#ffffff",allowFullScreen:"true",allowScriptAccess:"always",id:"vplayer"};new swfobject.embedSWF("http://videopremium.net/uplayer/uppod.swf","vplayer","728","450","9.0.115.0",false,flashvars,params);',,paramsflashvars,
    '''
    '''
    Property 'app' String 'play'
    Property 'swfUrl' String 'http://videopremium.net/uplayer/uppod.swf'
    Property 'pageUrl' String 'http://videopremium.net/8x0mq9hanl3a'
    Property 'tcUrl' String 'rtmp://e5.videopremium.net/play'
    play: String 'mp4:8x0mq9hanl3a.f4v'
    '''
    '''
    00:55:30 T:2955980800   ERROR: Valid RTMP options are:
    00:55:30 T:2955980800   ERROR:      socks string   Use the specified SOCKS proxy
    00:55:30 T:2955980800   ERROR:        app string   Name of target app on server
    00:55:30 T:2955980800   ERROR:      tcUrl string   URL to played stream
    00:55:30 T:2955980800   ERROR:    pageUrl string   URL of played media's web page
    00:55:30 T:2955980800   ERROR:     swfUrl string   URL to player SWF file
    00:55:30 T:2955980800   ERROR:   flashver string   Flash version string (default MAC 10,0,32,18)
    00:55:30 T:2955980800   ERROR:       conn AMF      Append arbitrary AMF data to Connect message
    00:55:30 T:2955980800   ERROR:   playpath string   Path to target media on server
    00:55:30 T:2955980800   ERROR:   playlist boolean  Set playlist before play command
    00:55:30 T:2955980800   ERROR:       live boolean  Stream is live, no seeking possible
    00:55:30 T:2955980800   ERROR:  subscribe string   Stream to subscribe to
    00:55:30 T:2955980800   ERROR:        jtv string   Justin.tv authentication token
    00:55:30 T:2955980800   ERROR:       weeb string   Weeb.tv authentication token
    00:55:30 T:2955980800   ERROR:      token string   Key for SecureToken response
    00:55:30 T:2955980800   ERROR:     swfVfy boolean  Perform SWF Verification
    00:55:30 T:2955980800   ERROR:     swfAge integer  Number of days to use cached SWF hash
    00:55:30 T:2955980800   ERROR:    swfsize integer  Size of the decompressed SWF file
    00:55:30 T:2955980800   ERROR:    swfhash string   SHA256 hash of the decompressed SWF file
    00:55:30 T:2955980800   ERROR:      start integer  Stream start position in milliseconds
    00:55:30 T:2955980800   ERROR:       stop integer  Stream stop position in milliseconds
    00:55:30 T:2955980800   ERROR:     buffer integer  Buffer time in milliseconds
    00:55:30 T:2955980800   ERROR:    timeout integer  Session timeout in seconds
    '''
    rtmpurl=scrapertools.get_match(unpacked,'"file"\:"([^"]+)"').replace("tengig0.lb.videopremium.net","e4.videopremium.net")
    playpath=scrapertools.get_match(unpacked,'p2pkey\:"([^"]+)"')
    swfurl=scrapertools.get_match(unpacked,'embedSWF\("([^"]+)"')
    pageurl = page_url
    app="play"
    tcurl="rtmp://e4.videopremium.net/play"
    location = rtmpurl+" playpath="+playpath+" swfurl="+swfurl+" pageUrl="+page_url+" tcurl="+tcurl+" app="+app #swfvfy=true

    logger.info("location="+location)
    video_urls.append( [ "RTMP [videopremium]",location ] )

    return video_urls
コード例 #27
0
def geturl(urlvideo):
    logger.info("[divxlink.py] url="+urlvideo)
    # ---------------------------------------
    #  Inicializa la libreria de las cookies
    # ---------------------------------------
    ficherocookies = COOKIEFILE
    try:
        os.remove(ficherocookies)
    except:
        pass
    # the path and filename to save your cookies in

    cj = None
    ClientCookie = None
    cookielib = None

    # Let's see if cookielib is available
    try:
        import cookielib
    except ImportError:
        # If importing cookielib fails
        # let's try ClientCookie
        try:
            import ClientCookie
        except ImportError:
            # ClientCookie isn't available either
            urlopen = urllib2.urlopen
            Request = urllib2.Request
        else:
            # imported ClientCookie
            urlopen = ClientCookie.urlopen
            Request = ClientCookie.Request
            cj = ClientCookie.LWPCookieJar()

    else:
        # importing cookielib worked
        urlopen = urllib2.urlopen
        Request = urllib2.Request
        cj = cookielib.LWPCookieJar()
        # This is a subclass of FileCookieJar
        # that has useful load and save methods

    # ---------------------------------
    # Instala las cookies
    # ---------------------------------

    if cj is not None:
    # we successfully imported
    # one of the two cookie handling modules

        if os.path.isfile(ficherocookies):
            # if we have a cookie file already saved
            # then load the cookies into the Cookie Jar
            cj.load(ficherocookies)

        # Now we need to get our Cookie Jar
        # installed in the opener;
        # for fetching URLs
        if cookielib is not None:
            # if we use cookielib
            # then we get the HTTPCookieProcessor
            # and install the opener in urllib2
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            urllib2.install_opener(opener)

        else:
            # if we use ClientCookie
            # then we get the HTTPCookieProcessor
            # and install the opener in ClientCookie
            opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
            ClientCookie.install_opener(opener)

    #print "-------------------------------------------------------"
    url=urlvideo
    #print url
    #print "-------------------------------------------------------"
    theurl = url
    # an example url that sets a cookie,
    # try different urls here and see the cookie collection you can make !

    txdata = None
    # if we were making a POST type request,
    # we could encode a dictionary of values here,
    # using urllib.urlencode(somedict)

    txheaders =  {'User-Agent':'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'}
    # fake a user agent, some websites (like google) don't like automated exploration

    req = Request(theurl, txdata, txheaders)
    handle = urlopen(req)
    cj.save(ficherocookies)                     # save the cookies again    

    data=handle.read()
    handle.close()
    #print data

    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    patron = 'http\:\/\/www\.divxlink\.com/([^\/]+)/(.*?)\.html'
    matches = re.compile(patron,re.DOTALL).findall(url)
    logger.info("[divxlink.py] fragmentos de la URL")
    scrapertools.printMatches(matches)
    
    codigo = ""
    nombre = ""
    if len(matches)>0:
        codigo = matches[0][0]
        nombre = matches[0][1]

    patron = '<input type="hidden" name="rand" value="([^"]+)">'
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    randomstring=""
    if len(matches)>0:
        randomstring=matches[0]
    logger.info("[divxlink.py] randomstring="+randomstring)

    txdata = "op=download2&id="+codigo+"&rand="+randomstring+"&referer=&method_free=&method_premium=&down_direct=1"
    logger.info(txdata)
    req = Request(theurl, txdata, txheaders)
    handle = urlopen(req)
    cj.save(ficherocookies)                     # save the cookies again    

    data=handle.read()
    handle.close()
    #print data
    patron = '<div id="embedcontmvshre"[^>]+>(.*?)</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches)>0:
        data = matches[0]
        logger.info("[divxlink.py] bloque packed="+data)
    else:
        return ""
    
    # Lo descifra
    descifrado = unpackerjs.unpackjs(data)
    
    logger.info("descifrado="+descifrado)
    # Extrae la URL
    patron = '<param name="src"value="([^"]+)"/>'
    matches = re.compile(patron,re.DOTALL).findall(descifrado)
    scrapertools.printMatches(matches)
    
    url = ""
    
    if len(matches)>0:
        url = matches[0]

    logger.info("[divxlink.py] url="+url)
    return url