def play(item):
    logger.info("[jkanime.py] play url="+item.url)
    
    itemlist = []

    if item.server=="directo":
        '''
        GET /stream/jkmedia/717aa382aee2117d9762067125ac79e2/6ee0218e84b123c0c84e98310176fdfc/1/2364e7a4d358dfffeaca3410e73c5e76/?t=7 HTTP/1.1
        Host: jkanime.net
        Connection: keep-alive
        User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.43 Safari/537.31
        Accept: */*
        Referer: http://jkanime.net/sukitte-ii-na-yo.-specials/1/
        Accept-Encoding: gzip,deflate,sdch
        Accept-Language: es-ES,es;q=0.8
        Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
        Cookie: ci_session=a%3A4%3A%7Bs%3A10%3A%22session_id%22%3Bs%3A32%3A%2294ef36f56048bf6394353e714505e100%22%3Bs%3A10%3A%22ip_address%22%3Bs%3A13%3A%2288.12.106.177%22%3Bs%3A10%3A%22user_agent%22%3Bs%3A50%3A%22Mozilla%2F5.0+%28Macintosh%3B+Intel+Mac+OS+X+10_8_2%29+App%22%3Bs%3A13%3A%22last_activity%22%3Bi%3A1364589959%3B%7D4263c499ebf728838ce9d8cb838cc55e; __cfduid=d6b6b16c05385bc35df57a09daa5e57e81364593644; flowplayer=3.2.8; gao_session_expiry=Sat, 30 Mar 2013 05:47:32 GMT; gao_skin_views=1; __utma=218181122.1870996415.1364593654.1364593654.1364593654.1; __utmb=218181122.1.10.1364593654; __utmc=218181122; __utmz=218181122.1364593654.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)
        ''' 
        headers = []
        headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"])
        headers.append(["Referer",item.extra])
        location = scrapertools.get_header_from_response( item.url , headers=headers , header_to_get="location" )
        logger.info("location="+location)
        location = scrapertools.get_header_from_response( item.url , headers=headers , header_to_get="location" )
        logger.info("location="+location)
        location = location + "|" + urllib.urlencode({'Referer':'http://jkanime.net/assets/images/players/jkplayer.swf'})
        #http://jkanime.net/stream/jkget/00e47553476031a35fd158881ca9d49f/32021b728c40bb5779190e0a95b72d40/?t=6e
        itemlist.append( Item(channel=__channel__, action="play" , title=item.title , url=location, thumbnail=item.thumbnail, fanart=item.thumbnail, plot=item.plot, server=item.server, folder=False))
    else:
        itemlist.append( Item(channel=__channel__, action="play" , title=item.title , url=item.url, thumbnail=item.thumbnail, fanart=item.thumbnail, plot=item.plot, server=item.server, folder=False))
    return itemlist
Beispiel #2
0
def play(item):
    logger.info("[somosmovies.py] play(item.url="+item.url+")")
    itemlist=[]

    if "bit.ly" in item.url:
        logger.info("Acortador bit.ly")
        location = scrapertools.get_header_from_response(item.url,header_to_get="location")
        logger.info("[somosmovies.py] location="+location)
        item.url = location
        return play(item)

    if "goo.gl" in item.url:
        logger.info("Acortador goo.gl")
        location = scrapertools.get_header_from_response(item.url,header_to_get="location")
        item.url = location
        return play(item)

    #adf.ly
    elif "j.gs" in item.url:
        logger.info("Acortador j.gs (adfly)")
        from servers import adfly
        location = adfly.get_long_url(item.url)
        item.url = location
        return play(item)

    else:
        from servers import servertools
        itemlist=servertools.find_video_items(data=item.url)
        for videoitem in itemlist:
            videoitem.channel=__channel__
            videoitem.folder=False

    return itemlist
Beispiel #3
0
def findvideos(item):
    logger.info("[peliculasonlineflv.py] findvideos")
    itemlist=[]

    # Descarga la p?gina
    data = scrapertools.cachePage(item.url)

    from servers import servertools
    itemlist.extend(servertools.find_video_items(data=data))
    for videoitem in itemlist:
        videoitem.channel=__channel__
        videoitem.action="play"
        videoitem.folder=False
        videoitem.title = "Ver en "+videoitem.server
        videoitem.fulltitle = item.fulltitle

    # Ahora busca patrones manuales
    try:
        vk_code = scrapertools.get_match(data,"vklat\=([a-zA-Z0-9]+)")
        vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location")
        itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK (Latino)" , server="vk" , url=vk_url , folder=False ) )
    except:
        logger.info("No encontrado enlace VK")

    try:
        putlocker_code = scrapertools.get_match(data,"plat\=([A-Z0-9]+)")
        putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code
        itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker (Latino)" , server="putlocker" , url=putlocker_url , folder=False ) )
    except:
        logger.info("No encontrado enlace PUTLOCKER")

    try:
        vk_code = scrapertools.get_match(data,"vksub\=([a-zA-Z0-9]+)")
        vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location")
        itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK (Subtitulado)" , server="vk" , url=vk_url , folder=False ) )
    except:
        logger.info("No encontrado enlace VK")

    try:
        putlocker_code = scrapertools.get_match(data,"plsub\=([A-Z0-9]+)")
        putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code
        itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker (Subtitulado)" , server="putlocker" , url=putlocker_url , folder=False ) )
    except:
        logger.info("No encontrado enlace PUTLOCKER")

    try:
        vk_code = scrapertools.get_match(data,"vk\=([a-zA-Z0-9]+)")
        vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location")
        itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK" , server="vk" , url=vk_url , folder=False ) )
    except:
        logger.info("No encontrado enlace VK")

    try:
        putlocker_code = scrapertools.get_match(data,"put\=([A-Z0-9]+)")
        putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code
        itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker" , server="putlocker" , url=putlocker_url , folder=False ) )
    except:
        logger.info("No encontrado enlace PUTLOCKER")

    return itemlist
def play(item):
    logger.info("[cb01anime.py] play")

    if '/goto/' in item.url:
        item.url = item.url.split('/goto/')[-1].decode('base64')

    item.url = item.url.replace('http://cineblog01.pw', 'http://k4pp4.pw')

    logger.debug("##############################################################")
    if "go.php" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        try:
            data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
        except IndexError:
            try:
                # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
                # In alternativa, dato che a volte compare "Clicca qui per proseguire":
                data = scrapertools.get_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
            except IndexError:
                data = scrapertools.get_header_from_response(item.url, headers=headers, header_to_get="Location")
        while 'vcrypt' in data:
            data = scrapertools.get_header_from_response(data, headers=headers, header_to_get="Location")
        logger.debug("##### play go.php data ##\n%s\n##" % data)
    elif "/link/" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        from lib import jsunpack

        try:
            data = scrapertools.get_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
            data = jsunpack.unpack(data)
            logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
        except IndexError:
            logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)

        data = scrapertools.find_single_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
        while 'vcrypt' in data:
            data = scrapertools.get_header_from_response(data, headers=headers, header_to_get="Location")
        logger.debug("##### play /link/ data ##\n%s\n##" % data)
    else:
        data = item.url
        logger.debug("##### play else data ##\n%s\n##" % data)
    logger.debug("##############################################################")

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
Beispiel #5
0
def get_server_url(url):

    cookie = scrapertools.get_header_from_response(url, header_to_get="set-cookie", headers = ENLACESPEPITO_REQUEST_HEADERS)

    import base64
    cookies = cookie.split("enlacespepito.com, ")

    index_charchange_class_v = base64.decodestring(cookies[3].split(";")[0].split("=")[1].replace("%2B","+").replace("%3D","=")).split("@")

    class_v = index_charchange_class_v[1]
    index = index_charchange_class_v[0].split("-")[0] 
    charchange = index_charchange_class_v[0].split("-")[1] 

    patron_class = '=."([^"]+)","([^"]+)","([^"]+)".;'
    patron_href = '=.*?=."([^"]+)","([^"]+)","([^"]+)".;'

    cookie6 = base64.decodestring(cookies[6].split(";")[0].split("=")[1].replace("%2B","+").replace("%3D","="))
    cookie7 = base64.decodestring(cookies[7].split(";")[0].split("=")[1].replace("%2B","+").replace("%3D","="))

    class1 = scrapertools.get_match(cookie6,patron_class); href1 = scrapertools.get_match(cookie6,patron_href)
    class2 = scrapertools.get_match(cookie7,patron_class); href2 = scrapertools.get_match(cookie7,patron_href)

    href_v = ""
    n = 0
    for temp in class1:
        if temp == class_v:
            i = 0
            for char in href1[n]:
                if int(index) == i: href_v += charchange
                else: href_v += char
                i += 1
            break
        n = n+1
    n = 0
    for temp in class2:
        if temp == class_v:
            i = 0
            for char in href2[n]:
                if int(index) == i: href_v += charchange
                else: href_v += char
                i += 1
            break
        n = n+1


    posible_url = "http://www.enlacespepito.com/"+href_v+".html"
    url = scrapertools.get_header_from_response(posible_url, header_to_get="location", headers = ENLACESPEPITO_REQUEST_HEADERS)

    return url
def findvid_serie(item):
    logger.info("[cineblog01.py] findvideos")

    itemlist = []

    # Descarga la página
    data = item.url
    data = data.replace('http://cineblog01.pw', 'http://k4pp4.pw')

    patron = '<a rel="nofollow" href="([^"]+)"[^>]+>(.*?)</a>'
    # Extrae las entradas
    matches = re.compile(patron, re.DOTALL).finditer(data)
    for match in matches:
        scrapedurl = match.group(1)
        if '/goto/' in scrapedurl:
            scrapedurl = scrapertools.get_header_from_response(scrapedurl, header_to_get="Location")
        scrapedtitle = match.group(2)
        title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 show=item.show,
                 folder=False))

    return itemlist
def play(item):
    logger.info("[tal.py] play")    
    data = scrapertools.cachePage(item.url)
    #logger.info(data)
    tcurl = "rtmpe://streaming.vzaar.com:1935/"

    itemlist = []
    
    #<param value="http://view.vzaar.com/605002.flashplayer" name="movie">
    patron = '<param value="([^"]+)" name="movie">'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
    if matches:
        swfurl = scrapertools.get_header_from_response(matches[0], "location")
        #logger.info("[tal.py] swfurl: " + swfurl)

    patron = '.*?guid=(.*?)\&.*?format=(.*?)\&'
    matches = re.compile(patron,re.DOTALL).findall(swfurl)
    if DEBUG: scrapertools.printMatches(matches)
    for guid, format in matches:
        if (format == 'mp4'):
            playpath = format + ":vzaar/" + guid[:3] + "/" + guid[3:6] + "/target/" + guid + "." + format
        else:
            playpath = "vzaar/" + guid[:3] + "/" + guid[3:6] + "/target/" + guid    
    #logger.info(playpath)
    scrapedurl = tcurl + " swfUrl=" + swfurl + " pageUrl=" + item.url + " playpath=" +  playpath + " swfVfy=true"      
    itemlist.append( Item(channel=__channel__, action="play",  server="directo",  title=item.title, url=scrapedurl, folder=False))

    return itemlist
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[uploadedto.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []
    
    if premium:
        # Login para conseguir la cookie    
        login_url = "http://uploaded.to/io/login"
        post = "id="+user+"&pw="+password
        headers = []
        headers.append( ["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.1) Gecko/20100101 Firefox/10.0.1"] )
        headers.append( ["X-Requested-With","XMLHttpRequest"] )
        headers.append( ["X-Prototype-Version","1.6.1"] )
        headers.append( ["Referer","http://uploaded.to/"] )
        
        data = scrapertools.cache_page( login_url, post=post, headers=headers)
        logger.info("data="+data)
        
        location = scrapertools.get_header_from_response( page_url , header_to_get = "location")
        logger.info("location="+location)
    
        video_urls.append( ["(Premium) [uploaded.to]" , page_url] )

    for video_url in video_urls:
        logger.info("[uploadedto.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def play(item):
    logger.info("pelisalacarta.bricocine findvideos")
    media_url = scrapertools.get_header_from_response(item.url,header_to_get="location")
    itemlist = servertools.find_video_items(data=media_url)
    
    if len(itemlist) == 0:
    
    
       itemlist = servertools.find_video_items(data=item.url)
       data = scrapertools.cache_page(item.url)
    
    
    
    listavideos = servertools.findvideos(data)
    
    for video in listavideos:
        videotitle = item.title
        url =item.url
        server = video[2]
        
        
    
    
   

    return itemlist
def play(item):
    logger.info("pelisalacarta.channels.playmax play url="+item.url)

    ## stopbot - url
    url = scrapertools.get_header_from_response(item.url, header_to_get="location")

    ## Descarga la página
    data = scrapertools.cache_page(url)
    ## Agrupa los datos
    data = re.sub(r'\n|\r|\t|&nbsp;|<br>','',data)
    data = re.sub(r'\s+',' ',data)
    data = re.sub(r'<!--.*?-->','',data)

    ## stopbot - POST tipo 1
    #$.ajax({ type: "POST", url: "./bot.php", data: "key=qdWmuqaRhpbdxtrx5cK7lt%2FNb8+XvpvbvGXXvaSv5MnRmuA%3D%3D&id=6539&k=MXVUVzE5TFg3dExkMGclM0QlM0Q=&tipo=1",

    ## stopbot - POST tipo 2
    #$.ajax({ type: "POST", url: "./bot.php", data: "dc=" + m + "&key=qdWmuqaRhpbdxtrx5cK7lt%2FNb8+XvpvbvGXXvaSv5MnRmuA%3D%3D&id=6539&k=MXVUVzE5TFg3dExkMGclM0QlM0Q=&tipo=2",

    tipo_1 = scrapertools.get_match(data,'data: "([^"]+)"')
    tipo_1 = scrapertools.cache_page('http://stopbot.tk/bot.php',post=tipo_1)

    tipo_2 = scrapertools.get_match(data,'data: "dc=" . m . "([^"]+)"')
    tipo_2 = "dc="+tipo_1+tipo_2

    data = scrapertools.cache_page('http://stopbot.tk/bot.php',post=tipo_2)

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist
Beispiel #11
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url, headers=DEFAULT_HEADERS)
    data = data.replace("\\", "")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''
    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie', header_cookie])

    subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
    qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
    for calidad, urls in qualities:
        if calidad == "auto":
            continue
        patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
        matches = scrapertools.find_multiple_matches(urls, patron)
        for stream_type, stream_url in matches:
            stream_type = stream_type.replace('x-mpegURL', 'm3u8')
            if stream_type == "mp4":
                stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",
                                                                   headers=DEFAULT_HEADERS)
            else:
                data_m3u8 = scrapertools.downloadpage(stream_url)
                stream_url = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
            video_urls.append([calidad + "p ." + stream_type + " [dailymotion]", stream_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.dailymotion %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #12
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[ustream.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    headers=[ ["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:14.0) Gecko/20100101 Firefox/14.0.1"] ]
    location = scrapertools.get_header_from_response(page_url, header_to_get="location")
    logger.info("[ustream.py] location="+location)

    page_url = urlparse.urljoin(page_url,location)
    logger.info("[ustream.py] page_url="+page_url)
    
    data = scrapertools.cache_page("http://piscui.webear.net/ustream.php?url="+page_url,headers=headers)
    logger.info("data="+data)

    video_url = scrapertools.get_match(data,'<textarea rows=3 cols=70>(.*?)</textarea>')

    logger.info("video_url="+video_url)

    if video_url!="":
        video_urls.append( [ "[ustream]" , video_url ] )

    for video_url in video_urls:
        logger.info("[ustream.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.lolabits get_video_url(page_url='%s')" % page_url)

    data = scrapertools.cache_page(page_url)
    #Parámetros post: token y fileId
    token = scrapertools.find_single_match(data,'<input.*?name="__RequestVerificationToken".*?value="([^"]+)"')
    fileId = scrapertools.find_single_match(data,'<input.*?name="FileId" value="([^"]+)"')
    post = "fileId="+fileId+"&__RequestVerificationToken="+urllib.quote(token)
    #URL para extrar dirección de descarga según servidor
    if "http://abelhas.pt" in page_url: url_download = "http://abelhas.pt/action/License/Download"
    else: url_download = "http://lolabits.es/action/License/Download"
    data = scrapertools.downloadpage(url_download , post=post)
    media_url = scrapertools.find_single_match(data,'"redirectUrl":"([^"]+)"')
    media_url = media_url.decode("unicode-escape")
    #Sacar header para el nombre del archivo
    try:
        content = scrapertools.get_header_from_response(media_url, header_to_get="content-disposition")
        extension = scrapertools.find_single_match(content, 'filename="([^"]+)"')[-4:]
    except:
        extension = page_url.rsplit('.',1)[1]

    video_urls = []
    video_urls.append( [ extension+" [lolabits]", media_url])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.lolabits %s - %s" % (video_url[0],video_url[1]))
    return video_urls
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[dailymotion.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []
    
    data = scrapertools.cache_page(page_url)
    #logger.info("data="+data)
    sequence = re.compile('"sequence":"(.+?)"').findall(data)
    logger.info("sequence="+str(sequence))
    newseqeunce = urllib.unquote(sequence[0]).decode('utf8').replace('\\/', '/')
    logger.info("newseqeunce="+newseqeunce)

    dm_low = re.compile('"sdURL":"(.+?)"').findall(newseqeunce)
    dm_high = re.compile('"hqURL":"(.+?)"').findall(newseqeunce)
    videoUrl = ''

    if len(dm_low) > 0:
        video_urls.append( [ "SD [dailymotion]",dm_low[0] ] )

    if len(dm_high) > 0:
        video_urls.append( [ "HD [dailymotion]",dm_high[0] ] )

    try:
        alternate_url = re.compile('"video_url":"(.+?)"').findall(newseqeunce)
        alternate_url = urllib.unquote( alternate_url[0] ).decode('utf8').replace('\\/', '/')

        location = scrapertools.get_header_from_response(alternate_url,header_to_get="location")

        video_urls.append( [ "SD [dailymotion]" , location ] )
    except:
        pass

    for video_url in video_urls:
        logger.info("[dailymotion.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Beispiel #15
0
def detalle_programa(item):
    logger.info("tvalacarta.channels.dwspan detalle_programa")    

    id_programa = scrapertools.find_single_match(item.url,"programm=(\d+)")
    url = "http://www.dw.com/es/programa/a/s-"+id_programa+"-1"

    try:
        item.page = scrapertools.get_header_from_response(url,header_to_get="location")
        data = scrapertools.cache_page(item.page)

        item.plot = scrapertools.find_single_match(data,'<div class="longText">(.*?)</div>')
        item.plot = scrapertools.htmlclean( item.plot ).strip()
        if item.plot=="":
            item.plot = scrapertools.find_single_match(data,'<div class="news"[^<]+<h2[^<]+</h2>(.*?)</div>')
            item.plot = scrapertools.htmlclean( item.plot ).strip()

        item.thumbnail = scrapertools.find_single_match(data,'<input type="hidden" name="preview_image" value="([^"]+)"')
        if item.thumbnail.strip()=="":
            item.thumbnail = scrapertools.find_single_match(data,'<img class="stillImage" src="([^"]+)"')
        item.thumbnail = urlparse.urljoin(item.page,item.thumbnail)
    except:
        import traceback
        logger.info(traceback.format_exc())

    return item
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    video = True
    data = scrapertools.downloadpageWithoutCookies(page_url)

    if "videocontainer" not in data:
        video = False
        url = page_url.replace("/embed/","/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.get_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = decode(data)
    else:
        text_encode = scrapertools.get_match(data,"<video[^<]+<script[^>]+>(.*?)</script>")
        text_decode = decode(data)

    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']+"|"
    if video == True:
        videourl = scrapertools.get_match(text_decode, "(http.*?true)")
        videourl = scrapertools.get_header_from_response(videourl,header_to_get="location")
        videourl = videourl.replace("https://","http://").replace("?mime=true","")
        extension = videourl[-4:]
        video_urls.append([ extension + " [Openload]", videourl+header_down+extension])
    else:
        videourl = scrapertools.find_single_match(text_decode, '"href",(?:\s|)\'([^\']+)\'')
        videourl = videourl.replace("https://","http://")
        extension = videourl[-4:]
        video_urls.append([ extension + " [Openload]", videourl+header_down+extension])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url,headers=DEFAULT_HEADERS)
    data = data.replace("\\","")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''
    patron = '"([^"]+)":\[\{"type":"video/([^"]+)","url":"([^"]+)"\}\]'
    matches = scrapertools.find_multiple_matches(data, patron)
    subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')

    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie',header_cookie])

    for stream_name,stream_type,stream_url in matches:
        stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",headers=DEFAULT_HEADERS)
        video_urls.append( [ stream_name+"p ."+stream_type+" [dailymotion]", stream_url, 0, subtitle ] )

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.dailymotion %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def peliculas_rss(item):
    logger.info("streamondemand.videotecadiclass peliculas_rss")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="fetch-rss-content ">\s*(.*?)<\/div>\s*<a\s*href="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedurl in matches:
        scrapedthumbnail = ""
        scrapedplot = ""
        scrapedurl = scrapertools.get_header_from_response(scrapedurl, header_to_get="Location")
        txt = "streaming"
        if txt not in scrapedtitle: continue
        old = "blogspot"
        if old in scrapedtitle: continue
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.split("(")[0]
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    return itemlist
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    
    try:
        data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    except:
        pass

    url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
    if not url_redirect:
        try:
            import StringIO
            compressedstream = StringIO.StringIO(data)
            import gzip
            gzipper = gzip.GzipFile(fileobj=compressedstream)
            data = gzipper.read()
            gzipper.close()
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
        except:
            pass

    
    location = scrapertools.get_header_from_response(url_redirect, headers=CHANNEL_HEADERS[:2], header_to_get="location")
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
Beispiel #20
0
def test_video_exists( page_url ):
    logger.info("[putlocker.py] test_video_exists(page_url='%s')" % page_url)

    location = scrapertools.get_header_from_response( url = page_url , header_to_get = "location")
    if "&404" in location:
        return False,"El archivo no existe<br/>en putlocker o ha sido borrado."
    
    data = scrapertools.cache_page(page_url)

    patron  = '<form method="post">[^<]+'
    patron += '<input type="hidden" value="([0-9a-f]+?)" name="([^"]+)">[^<]+'
    patron += '<input name="confirm" type="submit" value="([^"]+)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)==0: return True,""

    post = matches[0][1]+"="+matches[0][0]+"&confirm="+(matches[0][2].replace(" ","+"))
    headers = []
    headers.append( ['User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.2) Gecko/20100101 Firefox/10.0.2'] )
    headers.append( [ "Accept" , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ])
    headers.append( ['Referer',page_url] )

    data = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("data="+data)

    if '<div id="disabled">Encoding to enable streaming is in progresss. Try again soon.</div>' in data:
        try:
            title = scrapertools.get_match(data,"<title>PutLocker - ([^<]+)</title>")
        except:
            title=""
        return False,"El video \""+title+"\"<br/>esta pendiente de recodificar"

    return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[fileserve.py] get_video_url(page_url='%s')" % page_url)

    video_urls = []

    if premium:
        # Accede a la home para precargar la cookie
        data = scrapertools.cache_page("http://fileserve.com/index.php")
    
        # Hace el login
        url = "http://fileserve.com/login.php"
        post = "loginUserName=%s&loginUserPassword=%s&autoLogin=on&ppp=102&loginFormSubmit=Login" % (user,password)
        data = scrapertools.cache_page(url, post=post)
    
        location = scrapertools.get_header_from_response(page_url,header_to_get="location")
        logger.info("location="+location)
    
        if location.startswith("http"):
            extension = location[-4:]
            video_urls.append( [ "%s (Premium) [fileserve]" % extension, location ] )

    for video_url in video_urls:
        logger.info("[fileserve.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def get_long_url( short_url ):
    logger.info("[safelinking.py] get_long_url(short_url='%s')" % short_url)
    
    location = scrapertools.get_header_from_response(short_url,header_to_get="location")
    logger.info("location="+location)

    return location
Beispiel #23
0
def test_video_exists( page_url ):
    logger.info("[wupload.py] test_video_exists(page_url='%s')" % page_url)

    # Existe: http://www.wupload.com/file/2666595132
    # No existe: http://www.wupload.es/file/2668162342
    location = scrapertools.get_header_from_response(page_url,header_to_get="location")
    logger.info("location="+location)
    if location!="":
        page_url = location

    data = scrapertools.downloadpageWithoutCookies(page_url)
    logger.info("data="+data)
    patron  = '<p class="fileInfo filename"><span>Filename: </span> <strong>([^<]+)</strong></p>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    if len(matches)>0:
        return True,""
    else:
        patron  = '<p class="deletedFile">(Sorry, this file has been removed.)</p>'
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            return False,matches[0]
        
        patron = '<div class="section CL3 regDownloadMessage"> <h3>(File does not exist)</h3> </div>'
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            return False,matches[0]
    
    return True,""
def play(item):
    logger.info("[italiafilm.py] play")

    if item.url.startswith("http://www.italiafilm.tv/engine/go.php"):
        item.url = scrapertools.get_header_from_response(url=item.url, header_to_get="location")
        item.server = servertools.get_server_from_url(item.url)
    return [item]
Beispiel #25
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[fourshared.py] get_video_url(page_url='%s')" % page_url)

    video_urls = [ ]

    if page_url.startswith("http://www.4shared"):
        # http://www.4shared.com/embed/392975628/ff297d3f
        page_url = scrapertools.get_header_from_response(page_url,header_to_get="location")

        # http://www.4shared.com/flash/player.swf?file=http://dc237.4shared.com/img/392975628/ff297d3f/dlink__2Fdownload_2Flj9Qu-tF_3Ftsid_3D20101030-200423-87e3ba9b/preview.flv&d
        logger.info("[fourshared.py] redirect a '%s'" % page_url)
        patron = "file\=([^\&]+)\&"
        matches = re.compile(patron,re.DOTALL).findall(page_url)
        
        try:
            video_urls.append([ "[fourshared]" , matches[0] ])
        except:
            pass
    else:
        video_urls.append([ "[fourshared]" , page_url ])

    for video_url in video_urls:
        logger.info("[fourshared.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def play(item):
    logger.info("pelisalacarta.channels.descargasmix play")
    itemlist = []
    if "enlacesmix.com" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        if not item.url.startswith("http:"):
            item.url = "http:" + item.url
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'iframe src="([^"]+)"')
         
        enlaces = servertools.findvideos(data=item.url)[0]
        if len(enlaces) > 0:
            itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    elif item.server == "directo":
        global DEFAULT_HEADERS
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
        DEFAULT_HEADERS[1][1] = item.url
        calidades = ["1080p", "720p", "480p", "360p"]
        for i in range(0, len(calidades)):
            url_redirect = scrapertools.find_single_match(data, "{file:'([^']+)',label:'"+calidades[i]+"'")
            if url_redirect:
                url_video = scrapertools.get_header_from_response(url_redirect, header_to_get="location", headers=DEFAULT_HEADERS)
                if url_video:
                    url_video = url_video.replace(",", "%2C")
                    itemlist.append(item.clone(url=url_video, subtitle=subtitulo))
                    break
    else:
        itemlist.append(item.clone())
    
    return itemlist
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[filenium.py] get_video_url(page_url='%s')" % page_url)
    location=""
    page_url = correct_url(page_url)
    if premium:
        # Hace el login
        if "?.torrent" in page_url:
            location = page_url.replace("?.torrent","")        
        else:
            url = "http://filenium.com/welcome"
            post = "username=%s&password=%s" % (user,password)
            data = scrapertools.cache_page(url, post=post, timeout=TIMEOUT)
            link = urlencode({'filez':page_url})
            location = scrapertools.cache_page("http://filenium.com/?filenium&" + link, timeout=TIMEOUT)
            
        user = user.replace("@","%40")
        
        #logger.info("[filenium.py] torrent url (location='%s')" % location)
        
        if "xbmc" in config.get_platform():
            #location = location.replace("http://cdn.filenium.com","http://"+user+":"+password+"@cdn.filenium.com")
            location = location.replace("http://","http://"+user+":"+password+"@")
        else:
            location = location.replace("/?.zip","")
            user = user.replace(".","%2e")
            location = location + "?user="******"&passwd="+password

        logger.info("location="+location)

        # Averigua la redirección, para que funcione en Plex y WiiMC
        try:
            location2 = scrapertools.get_header_from_response(location,header_to_get="Location")
            logger.info("location2="+location2)
        except:
            location2=""

        if location2!="":
            location=location2

        '''
        if not location.startswith("http") and page_url.endswith(".torrent"):
            # Lee el id
            data=json.loads(location)
            logger.info("data="+str(data))
            name = data['name']

            datas = scrapertools.cachePage("http://filenium.com/xbmc_json", timeout=TIMEOUT)
            logger.info(datas)
            data = json.loads(datas)
            logger.info(str(data))
            
            for match in data:
                if match['status'] == "COMPLETED" and match['filename'].startswith(name):
                    location = match['download_url'] + "?.torrent"
                    logger.info("location="+location)
                    break
        '''

    return location
def play(item):
    logger.info("[cineblog01.py] play")

    print "##############################################################"
    if "go.php" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        try:
            data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
        except IndexError:
            #            data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
            #   In alternativa, dato che a volte compare "Clicca qui per proseguire":
            data = scrapertools.get_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
        if 'vcrypt' in data:
            data = scrapertools.get_header_from_response(data, headers=headers, header_to_get="Location")
        print "##### play go.php data ##\n%s\n##" % data
    elif "/link/" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        from core import jsunpack

        try:
            data = scrapertools.get_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
            # data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>")
            data = jsunpack.unpack(data)
            print "##### play /link/ unpack ##\n%s\n##" % data
        except IndexError:
            print "##### The content is yet unpacked"

        data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
        if 'vcrypt' in data:
            data = scrapertools.get_header_from_response(data, headers=headers, header_to_get="Location")
        print "##### play /link/ data ##\n%s\n##" % data
    else:
        data = item.url
        print "##### play else data ##\n%s\n##" % data
    print "##############################################################"

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("streamondemand.servers.allmyvideos url=%s" % page_url)

    # Normaliza la URL
    videoid = scrapertools.get_match(page_url, "http://allmyvideos.net/([a-z0-9A-Z]+)")
    page_url = "http://amvtv.net/embed-" + videoid + "-728x400.html"
    data = scrapertools.cachePage(page_url)
    if "Access denied" in data:
        geobloqueo = True
    else:
        geobloqueo = False

    if geobloqueo:
        # url = "http://www.anonymousbrowser.xyz/hide.php"
        # post = "go=%s" % page_url
        url = "http://www.videoproxy.co/hide.php"
        post = "go=%s" % page_url
        location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
        # url = "http://www.anonymousbrowser.xyz/" + location
        url = "http://www.videoproxy.co/" + location
        data = scrapertools.cachePage(url)

    # Extrae la URL
    media_url = scrapertools.find_single_match(data, '"file" : "([^"]+)",')

    video_urls = []

    if media_url != "":
        if geobloqueo:
            # url = "http://www.anonymousbrowser.xyz/hide.php"
            url = "http://www.videoproxy.co/hide.php"
            post = "go=%s" % media_url
            location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
            # media_url = "http://www.anonymousbrowser.xyz/" + location + "&direct=false"
            media_url = "http://www.videoproxy.co/" + location + "&direct=false"
        else:
            media_url += "&direct=false"

        video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [allmyvideos]", media_url])

        for video_url in video_urls:
            logger.info("streamondemand.servers.allmyvideos %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #30
0
def play(item):
    logger.info("pelisalacarta.seriesmu play")

    media_url = scrapertools.get_header_from_response(item.url, header_to_get="Location")
    itemlist = servertools.find_video_items(data=media_url)

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(data=item.url)

    return itemlist
Beispiel #31
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % page_url)

    if config.get_setting("premium", server="onefichier"):
        user = config.get_setting("user", server="onefichier")
        password = config.get_setting("password", server="onefichier")

        url = "https://1fichier.com/login.pl"
        logger.info("url=" + url)
        post_parameters = {"mail": user, "pass": password, "lt": "on", "purge": "on", "valider": "Send"}
        post = urllib.urlencode(post_parameters)
        logger.info("post=" + post)

        data = scrapertools.cache_page(url, post=post)
        # logger.info("data="+data)

        cookies = config.get_cookie_data()
        logger.info("cookies=" + cookies)

        # 1fichier.com   TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        sid_cookie_value = scrapertools.find_single_match(cookies, "1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)")
        logger.info("sid_cookie_value=" + sid_cookie_value)

        # .1fichier.com  TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        cookie = urllib.urlencode({"SID": sid_cookie_value})

        # Averigua el nombre del fichero real
        headers = []
        headers.append(['User-Agent',
                        'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12'])
        headers.append(['Cookie', cookie])
        filename = scrapertools.get_header_from_response(page_url, header_to_get="Content-Disposition")
        logger.info("filename=" + filename)

        # Construye la URL final para Kodi
        location = page_url + "|Cookie=" + cookie
        logger.info("location=" + location)

        video_urls = []
        video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #32
0
def get_file_extension(location):
    logger.info("[filenium.py] get_file_extension("+location+")")

    try:
        content_disposition_header = scrapertools.get_header_from_response(location,header_to_get="Content-Disposition")
        logger.info("content_disposition="+content_disposition_header)
        partes=content_disposition_header.split("=")
        if len(partes)<=1:
            extension=""
        else:
            fichero = partes[1]
            fichero = fichero.replace("\\","")
            fichero = fichero.replace("'","")
            fichero = fichero.replace('"',"")
            extension = fichero[-4:]
    except:
        extension=""
    return extension
def get_server_link(first_link, link_type):
    logger.info("[seriespepito.py] first_link=" + str(first_link) +
                ", link_type=" + str(link_type))

    html = scrapertools.downloadpage(first_link,
                                     headers=ENLACESPEPITO_REQUEST_HEADERS)
    logger.info("[seriespepito.py] html=" + html)

    fixed_link = convert_link(html, link_type)
    logger.info("[seriespepito.py] fixed_link=" + fixed_link)

    # Sin el Referer da 404
    #ENLACESPEPITO_REQUEST_HEADERS.append(['Referer', first_link])

    return scrapertools.get_header_from_response(
        fixed_link,
        header_to_get="location",
        headers=ENLACESPEPITO_REQUEST_HEADERS)
def play(item):
    logger.info("[cineblog01.py] play")

    print "##############################################################"
    if "go.php" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        try:
            data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
        except IndexError:
            #            data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
            #   In alternativa, dato che a volte compare "Clicca qui per proseguire":
            data = scrapertools.get_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
        if 'vcrypt' in data:
            data = scrapertools.get_header_from_response(data, headers=headers, header_to_get="Location")
        print "##### play go.php data ##\n%s\n##" % data
    elif "/link/" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        from core import jsunpack

        try:
            data = scrapertools.get_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
            # data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>")
            data = jsunpack.unpack(data)
            print "##### play /link/ unpack ##\n%s\n##" % data
        except IndexError:
            print "##### The content is yet unpacked"

        data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
        print "##### play /link/ data ##\n%s\n##" % data
    else:
        data = item.url
        print "##### play else data ##\n%s\n##" % data
    print "##############################################################"

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
Beispiel #35
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[bliptv.py] get_video_url(page_url='%s')" % page_url)

    video_urls = []

    if page_url.startswith("http://blip.tv/play"):
        redirect = scrapertools.get_header_from_response(
            page_url, header_to_get="location")
        logger.info("[bliptv.py] redirect=" + redirect)

        patron = 'file\=(.*?)$'
        matches = re.compile(patron).findall(redirect)
        logger.info("[bliptv.py] matches1=%d" % len(matches))

        if len(matches) == 0:
            patron = 'file\=([^\&]+)\&'
            matches = re.compile(patron).findall(redirect)
            logger.info("[bliptv.py] matches2=%d" % len(matches))

        if len(matches) > 0:
            url = matches[0]
            logger.info("[bliptv.py] url=" + url)
            url = urllib.unquote(url)
            logger.info("[bliptv.py] url=" + url)

            data = scrapertools.cache_page(url)
            logger.info(data)
            patron = '<media\:content url\="([^"]+)" blip\:role="([^"]+)".*?type="([^"]+)"[^>]+>'
            matches = re.compile(patron).findall(data)
            scrapertools.printMatches(matches)

            for match in matches:
                video_url = ["%s [blip.tv]" % match[1], match[0]]
                video_urls.append(video_url)

    for video_url in video_urls:
        logger.info("[bliptv.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
def play(item):
    logger.info("[streamingpopcorn.py] play")

    data = scrapertools.cache_page(item.url, headers=headers)

    path = scrapertools.find_single_match(data, "href='(linker.php.id=[^']+)'")
    url = urlparse.urljoin(host, path)
    location = scrapertools.get_header_from_response(url, header_to_get="Location")

    itemlist = servertools.find_video_items(data=location)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
Beispiel #37
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[mail.ru.py] get_video_url(page_url='%s')" % page_url)

    video_urls = []

    # Descarga
    data = scrapertools.cache_page( page_url )
    logger.info("data="+data)
    url = scrapertools.get_match( data , 'videoSrc\s*\=\s*"([^"]+)"' )
    media_url = scrapertools.get_header_from_response(url,header_to_get="location")
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [mail.ru]",media_url ] )

    for video_url in video_urls:
        logger.info("[mail.ru] %s - %s" % (video_url[0],video_url[1]))

    return video_urls


    return video_urls
Beispiel #38
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[dailymotion.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data = scrapertools.cache_page(page_url)
    #logger.info("data="+data)
    sequence = re.compile('"sequence":"(.+?)"').findall(data)
    logger.info("sequence=" + str(sequence))
    newseqeunce = urllib.unquote(sequence[0]).decode('utf8').replace(
        '\\/', '/')
    logger.info("newseqeunce=" + newseqeunce)

    dm_low = re.compile('"sdURL":"(.+?)"').findall(newseqeunce)
    dm_high = re.compile('"hqURL":"(.+?)"').findall(newseqeunce)
    videoUrl = ''

    if len(dm_low) > 0:
        video_urls.append(["SD [dailymotion]", dm_low[0]])

    if len(dm_high) > 0:
        video_urls.append(["HD [dailymotion]", dm_high[0]])

    try:
        alternate_url = re.compile('"video_url":"(.+?)"').findall(newseqeunce)
        alternate_url = urllib.unquote(
            alternate_url[0]).decode('utf8').replace('\\/', '/')

        location = scrapertools.get_header_from_response(
            alternate_url, header_to_get="location")

        video_urls.append(["SD [dailymotion]", location])
    except:
        pass

    for video_url in video_urls:
        logger.info("[dailymotion.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #39
0
def test_video_exists(page_url):
    logger.info("[putlocker.py] test_video_exists(page_url='%s')" % page_url)

    location = scrapertools.get_header_from_response(url=page_url,
                                                     header_to_get="location")
    if "&404" in location:
        return False, "El archivo no existe<br/>en putlocker o ha sido borrado."

    data = scrapertools.cache_page(page_url)

    patron = '<form method="post">[^<]+'
    patron += '<input type="hidden" value="([0-9a-f]+?)" name="([^"]+)">[^<]+'
    patron += '<input name="confirm" type="submit" value="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches) == 0: return True, ""

    post = matches[0][1] + "=" + matches[0][0] + "&confirm=" + (
        matches[0][2].replace(" ", "+"))
    headers = []
    headers.append([
        'User-Agent',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.2) Gecko/20100101 Firefox/10.0.2'
    ])
    headers.append([
        "Accept",
        "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
    ])
    headers.append(['Referer', page_url])

    data = scrapertools.cache_page(page_url, post=post, headers=headers)
    logger.info("data=" + data)

    if '<div id="disabled">Encoding to enable streaming is in progresss. Try again soon.</div>' in data:
        try:
            title = scrapertools.get_match(
                data, "<title>PutLocker - ([^<]+)</title>")
        except:
            title = ""
        return False, "El video \"" + title + "\"<br/>esta pendiente de recodificar"

    return True, ""
Beispiel #40
0
def decodeOpenLoad(html, video=True):
    if video == True:
        aastring = re.search(r"<video(?:.|\s)*?<script\s[^>]*?>((?:.|\s)*?)</script", html, re.DOTALL | re.IGNORECASE).group(1)
    else:
        aastring = re.search(r"Click to start Download(?:.|\s).*?<script\s[^>]*?>((?:.|\s)*?)</script", html, re.DOTALL | re.IGNORECASE).group(1)
    
    aastring = aastring.replace("((゚ー゚) + (゚ー゚) + (゚Θ゚))", "9")
    aastring = aastring.replace("((゚ー゚) + (゚ー゚))","8")
    aastring = aastring.replace("((゚ー゚) + (o^_^o))","7")
    aastring = aastring.replace("((o^_^o) +(o^_^o))","6")
    aastring = aastring.replace("((゚ー゚) + (゚Θ゚))","5")
    aastring = aastring.replace("(゚ー゚)","4")
    aastring = aastring.replace("((o^_^o) - (゚Θ゚))","2")
    aastring = aastring.replace("(o^_^o)","3")
    aastring = aastring.replace("(゚Θ゚)","1")
    aastring = aastring.replace("(c^_^o)","0")
    aastring = aastring.replace("(゚Д゚)[゚ε゚]","\\")
    aastring = aastring.replace("(3 +3 +0)","6")
    aastring = aastring.replace("(3 - 1 +0)","2")
    aastring = aastring.replace("(1 -0)","1")
    aastring = aastring.replace("(4 -0)","4")

    decodestring = re.search(r"\\\+([^(]+)", aastring, re.DOTALL | re.IGNORECASE).group(1)
    decodestring = "\\+"+ decodestring
    decodestring = decodestring.replace("+","")
    decodestring = decodestring.replace(" ","")
    
    decodestring = decode(decodestring)
    decodestring = decodestring.replace("\\/","/")

    #Header para la descarga
    header_down = "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0|"
    if video == True:
        videourl = re.search(r'vr ="([^"]+)"', decodestring, re.DOTALL | re.IGNORECASE).group(1)
        extension = re.search(r'vt ="([^"]+)"', decodestring, re.DOTALL | re.IGNORECASE).group(1)
        videourl = scrapertools.get_header_from_response(videourl,header_to_get="location")
        return videourl+header_down+extension, extension
    else:
        videourl = re.search(r'\'href\',"([^"]+)"', decodestring, re.DOTALL | re.IGNORECASE).group(1)
        extension = videourl[-4:]
        return videourl+header_down+extension, extension
Beispiel #41
0
def play(item):
    logger.info("[streamingpopcorn.py] play")

    data = scrapertools.cache_page(item.url, headers=headers)

    # <a target='_blank' href='linker.php?id=yeTp0t%2BjkqLr6dyP5tjj2%2BXD05LE3%2BKR48rX1tyx257M09DeqMbamKadtg%3D%3D&umId=5493&src='><img src='images/icons/youtube.png'>&nbsp;Guarda su YouTube</a>

    path = scrapertools.find_single_match(data, "href='(linker.php.id=[^']+)'")
    url = urlparse.urljoin(host, path)
    location = scrapertools.get_header_from_response(url,
                                                     header_to_get="Location")

    itemlist = servertools.find_video_items(data=location)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
Beispiel #42
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" %
        page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url,
                                                       headers=DEFAULT_HEADERS)
    data = data.replace("\\", "")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''
    patron = '"([^"]+)":\[\{"type":"video/([^"]+)","url":"([^"]+)"\}\]'
    matches = scrapertools.find_multiple_matches(data, patron)
    subtitle = scrapertools.find_single_match(
        data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')

    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie', header_cookie])

    for stream_name, stream_type, stream_url in matches:
        stream_url = scrapertools.get_header_from_response(
            stream_url, header_to_get="location", headers=DEFAULT_HEADERS)
        video_urls.append([
            stream_name + "p ." + stream_type + " [dailymotion]", stream_url,
            0, subtitle
        ])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.dailymotion %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Beispiel #43
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[veehd.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    headers=[]
    headers.append(['User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:17.0) Gecko/20100101 Firefox/17.0'])
    setcookie = scrapertools.get_header_from_response(page_url,header_to_get="set-cookie")
    logger.info("setcookie="+setcookie)
    try:
        cookie = scrapertools.get_match(setcookie,"(PHPSESSID.*?)\;")
    except:
        cookie = ""
    
    if cookie!="":
        headers.append(['Cookie',cookie+"; pp=1356263122; ppt=1"])
    
    data = scrapertools.cache_page(page_url , headers=headers)
    #logger.info("data="+data)

    url = scrapertools.get_match(data,'\$\("\#playeriframe"\).attr\(\{src \: "([^"]+)"')
    url = urlparse.urljoin(page_url,url)
    logger.info("url="+url)

    headers.append(['Referer',page_url[:-1] ])
    data = scrapertools.cache_page( url , headers=headers )
    logger.info("data="+data)

    #<param name="src" value="http://v35.veehd.com/dl/f118c68806e2a98ca38a70b44b89d52b/1356264992/6000.4623246.avi&b=390">
    media_url = scrapertools.get_match(data,'<param name="src"value="([^"]+)"')
    
    video_urls = []
    
    if len(matches)>0:
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [veehd]",media_url])

    for video_url in video_urls:
        logger.info("[veehd.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def play(item):
    logger.info("pelisalacarta.channels.descargasmix play")
    itemlist = []
    if "enlacesmix.com" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        if not item.url.startswith("http:"):
            item.url = "http:" + item.url
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'iframe src="([^"]+)"')

        enlaces = servertools.findvideos(data=item.url)[0]
        if len(enlaces) > 0:
            itemlist.append(
                item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    elif item.server == "directo":
        global DEFAULT_HEADERS
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        subtitulo = scrapertools.find_single_match(data,
                                                   "var subtitulo='([^']+)'")
        DEFAULT_HEADERS[1][1] = item.url
        calidades = ["1080p", "720p", "480p", "360p"]
        for i in range(0, len(calidades)):
            url_redirect = scrapertools.find_single_match(
                data, "{file:'([^']+)',label:'" + calidades[i] + "'")
            if url_redirect:
                url_video = scrapertools.get_header_from_response(
                    url_redirect,
                    header_to_get="location",
                    headers=DEFAULT_HEADERS)
                if url_video:
                    url_video = url_video.replace(",", "%2C")
                    itemlist.append(
                        item.clone(url=url_video, subtitle=subtitulo))
                    break
    else:
        itemlist.append(item.clone())

    return itemlist
Beispiel #45
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[firedrive.py] url="+page_url)
    video_urls = []
    headers = []
    headers.append( [ "User-Agent"     , "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17"] )
    headers.append( [ "Accept"         , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ])
    headers.append( [ "Accept-Charset" , "ISO-8859-1,utf-8;q=0.7,*;q=0.3" ])
    headers.append( [ "Accept-Encoding", "gzip,deflate,sdch" ])
    headers.append( [ "Accept-Language", "es-ES,es;q=0.8" ])
    headers.append( [ "Cache-Control"  , "max-age=0" ])
    headers.append( [ "Connection"     , "keep-alive" ])
    headers.append( [ "Origin"         , "http://www.firedrive.com" ])
 
    # Primer acceso
    data = scrapertools.cache_page(page_url,headers=headers)
    #logger.info("data="+data)

    # Simula el "continue to video"
    confirm = scrapertools.find_single_match(data,'<input type="hidden" name="confirm" value="([^"]+)"')
    post = urllib.urlencode({'confirm':confirm})
    logger.info("post="+post)
    headers.append( ["Referer",page_url] )
    headers.append( ["Content-Type","application/x-www-form-urlencoded"])
    data = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("data="+data)
    
    # URL del descriptor
    url = scrapertools.find_single_match(data,"file\: '([^']+)'")
    logger.info("url="+url)

    # URL del vídeo
    media_url = scrapertools.get_header_from_response(url,header_to_get="location")
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [firedrive]",media_url ] )    

    for video_url in video_urls:
        logger.info("[firedrive.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Beispiel #46
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[twitvid.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    # Lee la página del player
    data = scrapertools.cache_page(page_url)
    logger.info("data=" + data)
    url = scrapertools.get_match(data, 'video_path="([^"]+)"')
    logger.info("url=" + url)
    import urlparse
    url = urlparse.urljoin(page_url, url)
    location = scrapertools.get_header_from_response(url,
                                                     header_to_get="location")

    video_urls.append([
        scrapertools.get_filename_from_url(location)[-4:] + " [twitvid]",
        location
    ])

    return video_urls
Beispiel #47
0
def play(item):
    logger.info("pelisalacarta.cultmoviez play url="+item.url)
    url_subtitle = ""
    itemlist = []
    if not item.extra.startswith("tt"):
        url_subtitle = "http://www.cultmoviez.info/playercult/bajarsub.php?%s" % item.extra
        content = scrapertools.get_header_from_response(url_subtitle, header_to_get="Content-Type")
        if content == "text/html": url_subtitle += '_HD'
    if "[directo]" in item.title:
        post = "fv=20&url="+item.url+"&sou=pic"
        data = scrapertools.cache_page("http://www.cultmoviez.info/playercult/pk/pk/plugins/player_p2.php", post=post)
        videourl = scrapertools.find_multiple_matches(data, '"url":"([^"]+)"')
        if len(videourl)>0:
            itemlist.append(Item(channel=__channel__, title=item.title, url=videourl[len(videourl)-1], server="directo", action="play", subtitle=url_subtitle))
        return itemlist
    else:
        itemlist = servertools.find_video_items(data=item.url)

        for videoitem in itemlist:
            videoitem.title = item.title
            videoitem.channel = __channel__
            videoitem.subtitle = url_subtitle

    return itemlist
Beispiel #48
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[ustream.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    headers = [[
        "User-Agent",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:14.0) Gecko/20100101 Firefox/14.0.1"
    ]]
    location = scrapertools.get_header_from_response(page_url,
                                                     header_to_get="location")
    logger.info("[ustream.py] location=" + location)

    page_url = urlparse.urljoin(page_url, location)
    logger.info("[ustream.py] page_url=" + page_url)

    data = scrapertools.cache_page(
        "http://piscui.webear.net/ustream.php?url=" + page_url,
        headers=headers)
    logger.info("data=" + data)

    video_url = scrapertools.get_match(
        data, '<textarea rows=3 cols=70>(.*?)</textarea>')

    logger.info("video_url=" + video_url)

    if video_url != "":
        video_urls.append(["[ustream]", video_url])

    for video_url in video_urls:
        logger.info("[ustream.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #49
0
def findvideos(item):
    logger.info("[streamondemand-pureita altadefinizione_due ] play")

    data = httptools.downloadpage(item.url, headers=headers).data

    path = scrapertools.find_single_match(data,
                                          '<p><iframe src="([^"]+)"[^>]+>')
    url = path
    location = scrapertools.get_header_from_response(url,
                                                     header_to_get="Location")

    itemlist = servertools.find_video_items(data=location)

    for videoitem in itemlist:
        videoitem.title = item.title + "  [COLOR orange]" + videoitem.title + "[/COLOR]"
        videoitem.fulltitle = item.fulltitle + videoitem.title
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist


# ===================================================================================================================================================
Beispiel #50
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "pelisalacarta.servers.yourupload get_video_url(page_url='%s')" %
        page_url)

    data = scrapertools.cache_page(page_url)
    url = scrapertools.find_single_match(data, "file\: '([^']+)'")

    headers = []
    headers.append(["User-Agent", USER_AGENT])
    headers.append(["Referer", page_url])
    headers.append(["X-Requested-With", "ShockwaveFlash/19.0.0.185"])

    media_url = scrapertools.get_header_from_response(url,
                                                      headers=headers,
                                                      header_to_get="location")
    logger.info("pelisalacarta.servers.mp4upload media_url=" + media_url)
    media_url = media_url.replace("?null&start=0", "")
    logger.info("pelisalacarta.servers.mp4upload media_url=" + media_url)
    #media_url = media_url + "|" + urllib.urlencode({'User-Agent' : USER_AGENT})

    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(url)[-4:] + " [yourupload]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.yourupload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Beispiel #51
0
def findvideos(item):
    logger.info("pelisalacarta.peliculasdk findvideos")

    itemlist = []
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"<!--.*?-->", "", data)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    bloque_tab = scrapertools.find_single_match(
        data, '<div id="verpelicula">(.*?)<div class="tab_container">')
    patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>'
    check = re.compile(patron, re.DOTALL).findall(bloque_tab)

    servers_data_list = []

    patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) == 0:
        patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>'
        matches = re.compile(patron, re.DOTALL).findall(data)
        print matches

    for check_tab, server, id in matches:
        scrapedplot = scrapertools.get_match(
            data, '<span class="clms">(.*?)</div></div>')
        plotformat = re.compile('(.*?:) </span>',
                                re.DOTALL).findall(scrapedplot)
        scrapedplot = scrapedplot.replace(
            scrapedplot,
            bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]"))

        for plot in plotformat:
            scrapedplot = scrapedplot.replace(
                plot,
                bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]"))
        scrapedplot = scrapedplot.replace("</span>", "[CR]")
        scrapedplot = scrapedplot.replace(":", "")
        if check_tab in str(check):
            idioma, calidad = scrapertools.find_single_match(
                str(check), "" + check_tab + "', '(.*?)', '(.*?)'")
            servers_data_list.append([server, id, idioma, calidad])

    url = "http://www.peliculasdk.com/Js/videod.js"
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    data = data.replace(
        '<iframe width="100%" height="400" scrolling="no" frameborder="0"', '')

    patron = 'function (\w+)\(id\).*?'
    patron += '"([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for server, url in matches:
        for enlace, id, idioma, calidad in servers_data_list:
            if server == enlace:
                video_url = re.sub(r"embed\-|\-630x400\.html", "", url)
                video_url = video_url.replace("'+codigo+'", id)
                if "goo.gl" in video_url:
                    video_url = scrapertools.get_header_from_response(
                        "http://anonymouse.org/cgi-bin/anon-www.cgi/" +
                        video_url,
                        header_to_get="location")
                servertitle = scrapertools.get_match(video_url,
                                                     'http.*?://(.*?)/')
                servertitle = servertitle.replace(
                    servertitle,
                    bbcode_kodi2html("[COLOR red]" + servertitle + "[/COLOR]"))
                servertitle = servertitle.replace("embed.", "")
                servertitle = servertitle.replace("player.", "")
                servertitle = servertitle.replace("api.video.", "")
                servertitle = servertitle.replace("hqq.tv", "netu.tv")
                servertitle = servertitle.replace("anonymouse.org", "netu.tv")
                title = bbcode_kodi2html(
                    "[COLOR orange]Ver en --[/COLOR]"
                ) + servertitle + " " + idioma + " " + calidad
                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=video_url,
                         action="play",
                         thumbnail=item.category,
                         plot=scrapedplot,
                         fanart=item.show))

    return itemlist
Beispiel #52
0
def play(url, xlistitem, is_view=None, subtitle=""):

    # -- Necesario para algunas webs ----------------------------
    if not url.endswith(".torrent") and not url.startswith("magnet"):
        t_file = scrapertools.get_header_from_response(
            url, header_to_get="location")
        if len(t_file) > 0:
            url = t_file
            t_file = scrapertools.get_header_from_response(
                url, header_to_get="location")
        if len(t_file) > 0:
            url = t_file

    # -- Crear dos carpetas en descargas para los archivos ------
    save_path_videos = os.path.join(config.get_setting("downloadpath"),
                                    "torrent-videos")
    save_path_torrents = os.path.join(config.get_setting("downloadpath"),
                                      "torrent-torrents")
    if not os.path.exists(save_path_torrents): os.mkdir(save_path_torrents)

    # -- Usar - archivo torrent desde web, meagnet o HD ---------
    if not os.path.isfile(url) and not url.startswith("magnet"):
        # -- http - crear archivo torrent -----------------------
        data = url_get(url)
        # -- El nombre del torrent será el que contiene en los --
        # -- datos.                                             -
        re_name = urllib.unquote(
            scrapertools.get_match(data, ':name\d+:(.*?)\d+:'))
        torrent_file = filetools.join(save_path_torrents,
                                      filetools.encode(re_name + '.torrent'))

        f = open(torrent_file, 'wb')
        f.write(data)
        f.close()
    elif os.path.isfile(url):
        # -- file - para usar torrens desde el HD ---------------
        torrent_file = url
    else:
        # -- magnet ---------------------------------------------
        torrent_file = url
    # -----------------------------------------------------------

    # -- MCT - MiniClienteTorrent -------------------------------
    ses = lt.session()

    print "### Init session ########"
    print lt.version
    print "#########################"

    ses.add_dht_router("router.bittorrent.com", 6881)
    ses.add_dht_router("router.utorrent.com", 6881)
    ses.add_dht_router("dht.transmissionbt.com", 6881)

    trackers = [
        "udp://tracker.openbittorrent.com:80/announce",
        "http://tracker.torrentbay.to:6969/announce",
        "http://tracker.pow7.com/announce",
        "udp://tracker.ccc.de:80/announce",
        "udp://open.demonii.com:1337",
        "http://9.rarbg.com:2710/announce",
        "http://bt.careland.com.cn:6969/announce",
        "http://explodie.org:6969/announce",
        "http://mgtracker.org:2710/announce",
        "http://tracker.best-torrents.net:6969/announce",
        "http://tracker.tfile.me/announce",
        "http://tracker1.wasabii.com.tw:6969/announce",
        "udp://9.rarbg.com:2710/announce",
        "udp://9.rarbg.me:2710/announce",
        "udp://coppersurfer.tk:6969/announce",
        "http://www.spanishtracker.com:2710/announce",
        "http://www.todotorrents.com:2710/announce",
    ]

    video_file = ""
    # -- magnet2torrent -----------------------------------------
    if torrent_file.startswith("magnet"):
        try:
            tempdir = tempfile.mkdtemp()
        except IOError:
            tempdir = os.path.join(save_path_torrents, "temp")
            if not os.path.exists(tempdir): os.mkdir(tempdir)

        params = {
            'save_path': tempdir,
            'trackers': trackers,
            'storage_mode': lt.storage_mode_t.storage_mode_allocate,
            'paused': False,
            'auto_managed': True,
            'duplicate_is_error': True
        }
        h = lt.add_magnet_uri(ses, torrent_file, params)
        dp = xbmcgui.DialogProgress()
        dp.create('pelisalacarta-MCT')
        while not h.has_metadata():
            message, porcent, msg_file, s, download = getProgress(
                h, "Creando torrent desde magnet")
            dp.update(porcent, message, msg_file)
            if s.state == 1: download = 1
            if dp.iscanceled():
                dp.close()
                remove_files(download, torrent_file, video_file, ses, h)
                return

            h.force_dht_announce()
            xbmc.sleep(1000)

        dp.close()
        info = h.get_torrent_info()
        data = lt.bencode(lt.create_torrent(info).generate())
        #torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
        torrent_file = os.path.join(
            save_path_torrents,
            unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
        f = open(torrent_file, 'wb')
        f.write(data)
        f.close()
        ses.remove_torrent(h)
        shutil.rmtree(tempdir)
    # -----------------------------------------------------------

    # -- Archivos torrent ---------------------------------------
    e = lt.bdecode(open(torrent_file, 'rb').read())
    info = lt.torrent_info(e)

    # -- El más gordo o uno de los más gordo se entiende que es -
    # -- el vídeo o es el vídeo que se usará como referencia    -
    # -- para el tipo de archivo                                -
    print "##### Archivos ## %s ##" % len(info.files())
    _index_file, _video_file, _size_file = get_video_file(info)

    _video_file_ext = os.path.splitext(_video_file)[1]
    if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
        print "##### storage_mode_t.storage_mode_allocate (" + _video_file_ext + ") #####"
        h = ses.add_torrent({
            'ti':
            info,
            'save_path':
            save_path_videos,
            'trackers':
            trackers,
            'storage_mode':
            lt.storage_mode_t.storage_mode_allocate
        })
    else:
        print "##### storage_mode: none (" + _video_file_ext + ") #####"
        h = ses.add_torrent({
            'ti':
            info,
            'save_path':
            save_path_videos,
            'trackers':
            trackers,
            'storage_mode':
            lt.storage_mode_t.storage_mode_sparse
        })
    # -----------------------------------------------------------

    # -- Descarga secuencial - trozo 1, trozo 2, ... ------------
    h.set_sequential_download(True)

    h.force_reannounce()
    h.force_dht_announce()

    # -- Prioritarizar/Seleccionar archivo-----------------------
    _index, video_file, video_size = get_video_files_sizes(info)
    if _index == -1:
        _index = _index_file
        video_file = _video_file
        video_size = _size_file

    # -- Inicio de variables para 'pause' automático cuando el  -
    # -- el vídeo se acerca a una pieza sin completar           -
    is_greater_num_pieces = False
    is_greater_num_pieces_plus = False
    is_greater_num_pieces_pause = False

    #porcent4first_pieces = int( video_size / 1073741824 )
    porcent4first_pieces = int(video_size * 0.000000005)
    if porcent4first_pieces < 10: porcent4first_pieces = 10
    if porcent4first_pieces > 100: porcent4first_pieces = 100
    #num_pieces_to_resume = int( video_size / 1610612736 )
    num_pieces_to_resume = int(video_size * 0.0000000025)
    if num_pieces_to_resume < 5: num_pieces_to_resume = 5
    if num_pieces_to_resume > 25: num_pieces_to_resume = 25

    print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
    print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume

    # -- Prioritarizar o seleccionar las piezas del archivo que -
    # -- se desea reproducir con 'file_priorities'              -
    piece_set = set_priority_pieces(h, _index, video_file, video_size)

    # -- Crear diálogo de progreso para el primer bucle ---------
    dp = xbmcgui.DialogProgress()
    dp.create('pelisalacarta-MCT')

    _pieces_info = {}

    # -- Doble bucle anidado ------------------------------------
    # -- Descarga - Primer bucle                                -
    while not h.is_seed():
        s = h.status()

        xbmc.sleep(100)

        # -- Recuperar los datos del progreso -------------------
        message, porcent, msg_file, s, download = getProgress(h,
                                                              video_file,
                                                              _pf=_pieces_info)

        # -- Si hace 'checking' existe descarga -----------------
        # -- 'download' Se usará para saber si hay datos        -
        # -- descargados para el diálogo de 'remove_files'      -
        if s.state == 1: download = 1

        # -- Player - play --------------------------------------
        # -- Comprobar si se han completado las piezas para el  -
        # -- inicio del vídeo    ...............                -
        first_pieces = True
        _p = ""
        _c = 0
        for i in range(piece_set[0], piece_set[porcent4first_pieces]):
            _p += "[%s:%s]" % (i, h.have_piece(i))
            first_pieces &= h.have_piece(i)
            if h.have_piece(i): _c += 1
        _pieces_info = {
            'current': 0,
            'continuous': "%s/%s" % (_c, porcent4first_pieces),
            'have': h.status().num_pieces,
            'len': len(piece_set)
        }
        _p = "##### first_pieces [%s/%s][%s]: " % (_c, porcent4first_pieces,
                                                   len(piece_set)) + _p
        print _p
        # -- -------------------------------------------------- -

        if is_view != "Ok" and first_pieces:
            print "##### porcent [%.2f%%]" % (s.progress * 100)
            is_view = "Ok"
            dp.close()

            # -- Player - Ver el vídeo --------------------------
            playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
            playlist.clear()
            #ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
            ren_video_file = os.path.join(save_path_videos, video_file)
            playlist.add(ren_video_file, xlistitem)
            #playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
            #playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
            player = play_video(xbmc.PLAYER_CORE_AUTO)
            player.play(playlist)
            '''
            # -- Player - Ver el vídeo --------------------------
            player = play_video()
            #player.play( os.path.join( save_path_videos, video_file ) )
            player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
            '''

            #player.play( os.path.join( save_path_videos, video_file ) )

            # -- Contador de cancelaciones para la ventana de   -
            # -- 'pause' automático                             -
            is_greater_num_pieces_canceled = 0
            continuous_pieces = 0
            porcent_time = 0.00
            current_piece = 0

            # -- Impedir que kodi haga 'resume' a un archivo ----
            # -- que se reprodució con anterioridad y que se    -
            # -- eliminó para impedir que intente la reprucción -
            # -- en una pieza que aún no se ha completado y se  -
            # -- active 'pause' automático                      -
            not_resume = True

            # -- Bandera subTítulos
            _sub = False

            # -- Segundo bucle - Player - Control de eventos ----
            while player.isPlaying():
                xbmc.sleep(100)

                # -- Añadir subTítulos
                if subtitle != "" and not _sub:
                    _sub = True
                    player.setSubtitles(subtitle)

                # -- Impedir que kodi haga 'resume' al inicio ---
                # -- de la descarga de un archivo conocido      -
                if not_resume:
                    player.seekTime(0)
                    not_resume = False
                    #xbmc.sleep(1000)

                # -- Control 'pause' automático                 -
                continuous_pieces = count_completed_continuous_pieces(
                    h, piece_set)

                if xbmc.Player().isPlaying():

                    # -- Porcentage del progreso del vídeo ------
                    porcent_time = player.getTime() / player.getTotalTime(
                    ) * 100

                    # -- Pieza que se está reproduciendo --------
                    current_piece = int(porcent_time / 100 * len(piece_set))

                    # -- Banderas de control --------------------
                    is_greater_num_pieces = (
                        current_piece >
                        continuous_pieces - num_pieces_to_resume)
                    is_greater_num_pieces_plus = (
                        current_piece + porcent4first_pieces >
                        continuous_pieces)
                    is_greater_num_pieces_finished = (
                        current_piece + porcent4first_pieces >= len(piece_set))

                    # -- Activa 'pause' automático --------------
                    if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
                        is_greater_num_pieces_pause = True
                        player.pause()

                    # -- Log ------------------------------------
                    _TotalTime = player.getTotalTime()
                    _Time = player.getTime()
                    _print_log = "\n##### Player ##################################"
                    _print_log += "\nTamaño del vídeo: %s" % video_size
                    _print_log += "\nTotal piezas: %s" % len(piece_set)
                    _print_log += "\nPiezas contiguas: %s" % continuous_pieces
                    _print_log += "\n-----------------------------------------------"
                    _print_log += "\nVídeo-Total segundos: %s" % _TotalTime
                    _print_log += "\nVídeo-Progreso segundos: %s" % _Time
                    _print_log += "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
                    _print_log += "\n-----------------------------------------------"
                    _print_log += "\ncurrent_piece: %s" % current_piece
                    _print_log += "\nis_greater_num_pieces: %s" % is_greater_num_pieces
                    _print_log += "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
                    _print_log += "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
                    _print_log += "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
                    _print_log += "\nPieza que se está visionando: %.2f" % (
                        porcent_time / 100 * len(piece_set))
                    _print_log += "\nOffset que se está visionando: %.2f" % (
                        porcent_time / 100 * video_size)
                    if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
                        _print_log += "\n+++++++++++++++++++++++++++++++++++++++++++++++"
                        _print_log += "\nPausa con:"
                        _print_log += "\n    current_piece = %s" % current_piece
                        _print_log += "\n    continuous_pieces = %s" % continuous_pieces
                    _print_log += "\n###############################################"
                    print _print_log
                    # -------------------------------------------
                    _pieces_info = {
                        'current': current_piece,
                        'continuous': continuous_pieces,
                        'have': h.status().num_pieces,
                        'len': len(piece_set)
                    }

                # -- Cerrar el diálogo de progreso --------------
                if player.resumed:
                    dp.close()

                # -- Mostrar el diálogo de progreso -------------
                if player.paused:
                    # -- Crear diálogo si no existe -------------
                    if not player.statusDialogoProgress:
                        dp = xbmcgui.DialogProgress()
                        dp.create('pelisalacarta-MCT')
                        player.setDialogoProgress()

                    # -- Diálogos de estado en el visionado -----
                    if not h.is_seed():
                        # -- Recuperar los datos del progreso ---
                        message, porcent, msg_file, s, download = getProgress(
                            h, video_file, _pf=_pieces_info)
                        dp.update(porcent, message, msg_file)
                    else:
                        dp.update(100, "Descarga completa: " + video_file)

                    # -- Se canceló el progreso en el visionado -
                    # -- Continuar                              -
                    if dp.iscanceled():
                        dp.close()
                        player.pause()

                    # -- Se canceló el progreso en el visionado -
                    # -- en la ventana de 'pause' automático.   -
                    # -- Parar si el contador llega a 3         -
                    if dp.iscanceled() and is_greater_num_pieces_pause:
                        is_greater_num_pieces_canceled += 1
                        if is_greater_num_pieces_canceled == 3:
                            player.stop()

                    # -- Desactiva 'pause' automático y ---------
                    # -- reinicia el contador de cancelaciones  -
                    if not dp.iscanceled(
                    ) and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
                        dp.close()
                        player.pause()
                        is_greater_num_pieces_pause = False
                        is_greater_num_pieces_canceled = 0

                    # -- El usuario cancelo el visionado --------
                    # -- Terminar                               -
                    if player.ended:
                        # -- Diálogo eliminar archivos ----------
                        remove_files(download, torrent_file, video_file, ses,
                                     h)
                        return

        # -- Kodi - Se cerró el visionado -----------------------
        # -- Continuar | Terminar                               -
        if is_view == "Ok" and not xbmc.Player().isPlaying():

            if info.num_files() == 1:
                # -- Diálogo continuar o terminar ---------------
                d = xbmcgui.Dialog()
                ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.',
                             '¿Continuar con la sesión?')
            else:
                ok = False
            # -- SI ---------------------------------------------
            if ok:
                # -- Continuar: ---------------------------------
                is_view = None
            else:
                # -- Terminar: ----------------------------------
                # -- Comprobar si el vídeo pertenece a una ------
                # -- lista de archivos                          -
                _index, video_file, video_size = get_video_files_sizes(info)
                if _index == -1 or info.num_files() == 1:
                    # -- Diálogo eliminar archivos --------------
                    remove_files(download, torrent_file, video_file, ses, h)
                    return
                else:
                    # -- Lista de archivos. Diálogo de opciones -
                    piece_set = set_priority_pieces(h, _index, video_file,
                                                    video_size)
                    is_view = None
                    dp = xbmcgui.DialogProgress()
                    dp.create('pelisalacarta-MCT')

        # -- Mostar progeso antes del visionado -----------------
        if is_view != "Ok":
            dp.update(porcent, message, msg_file)

        # -- Se canceló el progreso antes del visionado ---------
        # -- Terminar                                           -
        if dp.iscanceled():
            dp.close()
            # -- Comprobar si el vídeo pertenece a una lista de -
            # -- archivos                                       -
            _index, video_file, video_size = get_video_files_sizes(info)
            if _index == -1 or info.num_files() == 1:
                # -- Diálogo eliminar archivos ------------------
                remove_files(download, torrent_file, video_file, ses, h)
                return
            else:
                # -- Lista de archivos. Diálogo de opciones -----
                piece_set = set_priority_pieces(h, _index, video_file,
                                                video_size)
                is_view = None
                dp = xbmcgui.DialogProgress()
                dp.create('pelisalacarta-MCT')

    # -- Kodi - Error? - No debería llegar aquí -----------------
    if is_view == "Ok" and not xbmc.Player().isPlaying():
        dp.close()
        # -- Diálogo eliminar archivos --------------------------
        remove_files(download, torrent_file, video_file, ses, h)

    return
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[uploadedto.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    if premium:
        # Login para conseguir la cookie
        logger.info(
            "[uploadedto.py] -------------------------------------------")
        logger.info("[uploadedto.py] login")
        logger.info(
            "[uploadedto.py] -------------------------------------------")
        login_url = "http://uploaded.net/io/login"
        post = "id=" + user + "&pw=" + password
        headers = []
        headers.append([
            "User-Agent",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.1) Gecko/20100101 Firefox/10.0.1"
        ])
        headers.append(["X-Requested-With", "XMLHttpRequest"])
        headers.append(["X-Prototype-Version", "1.6.1"])
        headers.append(["Referer", "http://uploaded.to/"])

        setcookie = scrapertools.get_header_from_response(
            login_url, post=post, headers=headers, header_to_get="set-cookie")
        logger.info("Cabecera set-cookie=" + setcookie)

        logger.info(
            "[uploadedto.py] -------------------------------------------")
        logger.info("[uploadedto.py] obtiene la url")
        logger.info(
            "[uploadedto.py] -------------------------------------------")

        location = scrapertools.get_header_from_response(
            page_url, header_to_get="location")
        logger.info("location=" + location)
        #Set-Cookie3: auth=3315964ab4fac585fdd9d4228dc70264a1756ba; path="/"; domain=".uploaded.to"; path_spec; domain_dot; expires="2015-02-25 18:35:37Z"; version=0
        #Set-Cookie3: login="******"; path="/"; domain=".uploaded.to"; path_spec; domain_dot; expires="2013-02-25 18:35:37Z"; version=0
        '''
        #cookie_data=config.get_cookie_data()
        #logger.info("cookie_data="+cookie_data)
        cookie_data = setcookie
        auth = scrapertools.get_match( cookie_data , 'auth=([a-z0-9]+)' )
        logger.info("auth="+auth)
        #%26id%3D7308170%26pw%3Df14c8daa489647d758a88474f509cd4277980f6b%26cks%3D204cffc6c96f
        login = scrapertools.get_match( cookie_data , 'login=([a-zA-Z0-9\%]+)' )
        logger.info("login="******"Cookie", 'login='******'; auth='+auth])
        temp_location = scrapertools.get_header_from_response( location , header_to_get = "location" , headers=headers)
        logger.info("temp_location="+temp_location)

        #location = location + "|Cookie="+urllib.quote('login='******'; auth='+auth)
        location = temp_location
        '''

        logger.info(
            "[uploadedto.py] -------------------------------------------")
        logger.info("[uploadedto.py] obtiene el nombre del fichero")
        logger.info(
            "[uploadedto.py] -------------------------------------------")
        try:
            #content-disposition=attachment; filename="El Hobbit CAM LATINO Barbie.avi"
            content_disposition = scrapertools.get_header_from_response(
                location, header_to_get="content-disposition", headers=headers)
            if content_disposition != "":
                filename = scrapertools.get_match(content_disposition,
                                                  'filename="([^"]+)"')
                extension = filename[-4:]
        except:
            extension = ""
        '''
        temp_location = scrapertools.get_header_from_response( location , header_to_get = "location" , headers=headers)
        logger.info("temp_location="+temp_location)
        if temp_location!="":
            location = temp_location
        '''

        video_urls.append([extension + " (Premium) [uploaded.to]", location])

    for video_url in video_urls:
        logger.info("[uploadedto.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #54
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[nowvideo.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    if premium:
        # Lee la página de login
        login_url = "http://www.nowvideo.eu/login.php"
        data = scrapertools.cache_page(login_url)

        # Hace el login
        login_url = "http://www.nowvideo.eu/login.php?return="
        post = "user="******"&pass="******"&register=Login"
        headers = [["User-Agent", USER_AGENT],
                   ["Referer", "http://www.nowvideo.eu/login.php"]]
        data = scrapertools.cache_page(login_url, post=post, headers=headers)

        # Descarga la página del vídeo
        data = scrapertools.cache_page(page_url)
        logger.debug("data:" + data)

        # URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined
        # En la página:
        '''
        flashvars.domain="http://www.nowvideo.eu";
        flashvars.file="rxnwy9ku2nwx7";
        flashvars.filekey="83.46.246.226-c7e707c6e20a730c563e349d2333e788";
        flashvars.advURL="0";
        flashvars.autoplay="false";
        flashvars.cid="1";
        flashvars.user="******";
        flashvars.key="bbb";
        flashvars.type="1";
        '''
        flashvar_file = scrapertools.find_single_match(
            data, 'flashvars.file="([^"]+)"')
        flashvar_filekey = scrapertools.find_single_match(
            data, 'flashvars.filekey=([^;]+);')
        flashvar_filekey = scrapertools.find_single_match(
            data, 'var ' + flashvar_filekey + '="([^"]+)"')
        flashvar_user = scrapertools.find_single_match(
            data, 'flashvars.user="******"]+)"')
        flashvar_key = scrapertools.find_single_match(
            data, 'flashvars.key="([^"]+)"')
        flashvar_type = scrapertools.find_single_match(
            data, 'flashvars.type="([^"]+)"')

        # http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined
        url = "http://www.nowvideo.eu/api/player.api.php?user="******"&file=" + flashvar_file + "&pass="******"&cid=1&cid2=undefined&key=" + flashvar_filekey.replace(
            ".", "%2E").replace("-", "%2D") + "&cid3=undefined"
        data = scrapertools.cache_page(url)
        logger.info("data=" + data)

        location = scrapertools.find_single_match(data, 'url=([^\&]+)&')
        location += "?client=FLASH"

        video_urls.append([
            scrapertools.get_filename_from_url(location)[-4:] +
            " [premium][nowvideo]", location
        ])

    else:

        data = scrapertools.cache_page(page_url)

        video_id = scrapertools.find_single_match(
            data, 'flashvars\.file\s*=\s*"([^"]+)')
        flashvar_filekey = scrapertools.find_single_match(
            data, 'flashvars\.file[_]*key\s*=\s*([^;]+)')
        filekey = scrapertools.find_single_match(
            data, 'var\s+%s\s*=\s*"([^"]+)' % flashvar_filekey)
        filekey = filekey.replace(".", "%2E").replace("-", "%2D")

        # get stream url from api
        url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (
            filekey, video_id)
        data = scrapertools.cache_page(url)

        data = scrapertools.find_single_match(data, 'url=([^&]+)')

        res = scrapertools.get_header_from_response(
            url, header_to_get="content-type")
        if res == "text/html":
            data = urllib.quote_plus(data).replace(".", "%2E")
            url = 'http://www.nowvideo.sx/api/player.api.php?cid3=undefined&numOfErrors=1&user=undefined&errorUrl=%s&pass=undefined&errorCode=404&cid=1&cid2=undefined&file=%s&key=%s' % (
                data, video_id, filekey)
            data = scrapertools.cache_page(url)
            try:
                data = scrapertools.find_single_match(data, 'url=([^&]+)')
            except:
                url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (
                    filekey, video_id)
                data = scrapertools.cache_page(url)
                data = scrapertools.find_single_match(data, 'url=([^&]+)')

        media_url = data

        video_urls.append([
            scrapertools.get_filename_from_url(media_url)[-4:] + " [nowvideo]",
            media_url
        ])

    return video_urls
Beispiel #55
0
def play(item):
    logger.info("[cb01anime.py] play")

    if '/goto/' in item.url:
        item.url = item.url.split('/goto/')[-1].decode('base64')

    item.url = item.url.replace('http://cineblog01.pw', 'http://k4pp4.pw')

    logger.debug(
        "##############################################################")
    if "go.php" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        try:
            data = scrapertools.get_match(data,
                                          'window.location.href = "([^"]+)";')
        except IndexError:
            try:
                # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
                # In alternativa, dato che a volte compare "Clicca qui per proseguire":
                data = scrapertools.get_match(
                    data,
                    r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
            except IndexError:
                data = scrapertools.get_header_from_response(
                    item.url, headers=headers, header_to_get="Location")
        while 'vcrypt' in data:
            data = scrapertools.get_header_from_response(
                data, headers=headers, header_to_get="Location")
        logger.debug("##### play go.php data ##\n%s\n##" % data)
    elif "/link/" in item.url:
        data = scrapertools.anti_cloudflare(item.url, headers)
        from core import jsunpack

        try:
            data = scrapertools.get_match(
                data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
            data = jsunpack.unpack(data)
            logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
        except IndexError:
            logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)

        data = scrapertools.find_single_match(
            data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
        while 'vcrypt' in data:
            data = scrapertools.get_header_from_response(
                data, headers=headers, header_to_get="Location")
        logger.debug("##### play /link/ data ##\n%s\n##" % data)
    else:
        data = item.url
        logger.debug("##### play else data ##\n%s\n##" % data)
    logger.debug(
        "##############################################################")

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
Beispiel #56
0
def play(item):
    logger.info("[rtve.py] play")

    # Extrae el código
    #http://www.rtve.es/mediateca/videos/20100410/telediario-edicion/741525.shtml
    #http://www.rtve.es/alacarta/videos/espana-entre-el-cielo-y-la-tierra/espana-entre-el-cielo-y-la-tierra-la-mancha-por-los-siglos-de-los-siglos/232969/
    logger.info("url=" + item.url)
    patron = 'http://.*?/([0-9]+)/'
    data = item.url.replace(".shtml", "/")
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    codigo = matches[0]
    logger.info("assetid=" + codigo)

    thumbnail = item.thumbnail

    ##### Nuevo metodo Octubre 2012
    #### Descargamos imagen con metadatos
    #### http://www.rtve.es/ztnr/movil/thumbnail/mandulis/videos/1538906.png
    #### direccion manager: http://www.rtve.es/odin/loki/TW96aWxsYS81LjAgKExpbnV4OyBVOyBBbmRyb2lkIDQuMC4zOyBlcy1lczsgTlZTQkwgVk9SVEVYIEJ1aWxkL0lNTDc0SykgQXBwbGVXZWJLaXQvNTM0LjMwIChLSFRNTCwgbGlrZSBHZWNrbykgVmVyc2lvbi80LjAgTW9iaWxlIFNhZmFyaS81MzQuMzA=/
    #urlimg = 'http://www.rtve.es/ztnr/movil/thumbnail/mandulis/videos/'+codigo+'.png'

    try:
        from lib import simplejson
        data = scrapertools.cachePage(
            "http://www.rtve.es/odin/loki/TW96aWxsYS81LjAgKExpbnV4OyBVOyBBbmRyb2lkIDQuMC4zOyBlcy1lczsgTlZTQkwgVk9SVEVYIEJ1aWxkL0lNTDc0SykgQXBwbGVXZWJLaXQvNTM0LjMwIChLSFRNTCwgbGlrZSBHZWNrbykgVmVyc2lvbi80LjAgTW9iaWxlIFNhZmFyaS81MzQuMzA=/"
        )
        json_data = simplejson.loads(data)
        manager = json_data["manager"]
    except:
        manager = "mandulis"

    urlimg = 'http://www.rtve.es/ztnr/movil/thumbnail/' + manager + '/videos/' + codigo + '.png'
    data = scrapertools.cachePage(urlimg)  ### descarga png con metadatos
    data = data.decode("base-64")  ### decodifica en base64
    patron = 'tEXt([^#]+)#'
    matches = re.compile(patron,
                         re.DOTALL).findall(data)  ## extrae el texto ofuscado
    try:
        cyphertext = matches[0]
    except:
        cyphertext = ""
    try:
        key = data.split('#')[1]
        key = key[1:270]  ## extrae la clave
        clave = ""
        for x in key:
            if x.isdigit():
                clave = clave + x
            else:
                continue
    except:
        clave = ""

    try:
        intermediate_cyphertext = first_pass(
            cyphertext)  ## primer paso: extrae el texto intermediario
        url = second_pass(
            clave, intermediate_cyphertext)  ## segundo paso: decodifica la url
    except:
        url = ""
    #################################################################################

    if url == "":
        try:
            # Compone la URL
            #http://www.rtve.es/api/videos/1311573/config/alacarta_videos.xml
            url = 'http://www.rtve.es/api/videos/' + codigo + '/config/alacarta_videos.xml'
            logger.info("[rtve.py] url=" + url)
            # Descarga el XML y busca el DataId
            #<cdnAssetDataId>828164</cdnAssetDataId>
            data = scrapertools.cachePage(url)
            patron = '<cdnAssetDataId>([^<]+)</cdnAssetDataId>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            url = ""
            if len(matches) > 0:
                codigo = matches[0]
            else:
                codigo = ""
            logger.info("assetDataId=" + codigo)
            if codigo != "":
                #url = http://www.rtve.es/ztnr/preset.jsp?idpreset=828164&lenguaje=es&tipo=video
                url = 'http://www.rtve.es/ztnr/preset.jsp?idpreset=' + codigo + '&lenguaje=es&tipo=video'
                data = scrapertools.cachePage(url)
                # Busca la url del video
                # <li><em>File Name</em>&nbsp;<span class="titulo">mp4/4/8/1328228115384.mp4</span></li>
                patron = '<li><em>File Name</em>.*?"titulo">([^<]+)</span></li>'
                matches = re.compile(patron, re.DOTALL).findall(data)
                scrapertools.printMatches(matches)
                if len(matches) > 0:
                    # match = mp4/4/8/1328228115384.mp4
                    #http://www.rtve.es/resources/TE_NGVA/mp4/4/8/1328228115384.mp4
                    url = "http://www.rtve.es/resources/TE_NGVA/" + matches[0]
                else:
                    url = ""

        except:
            url = ""

    if url == "":
        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/alacarta/5/2/5/1/741525.xml
            url = 'http://www.rtve.es/swf/data/es/videos/alacarta/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[
                    -3:-2] + '/' + codigo[-4:-3] + '/' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            # Descarga el XML y busca el vídeo
            #<file>rtmp://stream.rtve.es/stream/resources/alacarta/flv/6/9/1270911975696.flv</file>
            data = scrapertools.cachePage(url)
            #print url
            #print data
            patron = '<file>([^<]+)</file>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            if len(matches) > 0:
                #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
                url = matches[0]
            else:
                url = ""

            patron = ''
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #print len(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            thumbnail = matches[0]
        except:
            url = ""

    # Hace un segundo intento
    if url == "":
        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/video/0/5/8/0/500850.xml
            url = 'http://www.rtve.es/swf/data/es/videos/video/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[
                    -3:-2] + '/' + codigo[-4:-3] + '/' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            # Descarga el XML y busca el vídeo
            #<file>rtmp://stream.rtve.es/stream/resources/alacarta/flv/6/9/1270911975696.flv</file>
            data = scrapertools.cachePage(url)
            patron = '<file>([^<]+)</file>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            url = matches[0]
        except:
            url = ""

    if url == "":

        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/video/0/5/8/0/500850.xml
            url = 'http://www.rtve.es/swf/data/es/videos/video/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[
                    -3:-2] + '/' + codigo[-4:-3] + '/' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            # Descarga el XML y busca el assetDataId
            #<plugin ... assetDataId::576596"/>
            data = scrapertools.cachePage(url)
            #logger.info("[rtve.py] data="+data)
            patron = 'assetDataId\:\:([^"]+)"'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            codigo = matches[0]
            logger.info("assetDataId=" + codigo)

            #url = http://www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/6/9/5/6/ASSET_DATA_VIDEO-576596.xml
            url = 'http://www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[-3:-2] + '/' + codigo[
                    -4:-3] + '/ASSET_DATA_VIDEO-' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            data = scrapertools.cachePage(url)
            #logger.info("[rtve.py] data="+data)
            patron = '<field>[^<]+'
            patron += '<key>ASD_FILE</key>[^<]+'
            patron += '<value>([^<]+)</value>[^<]+'
            patron += '</field>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            codigo = matches[0]
            logger.info("[rtve.py] url=" + url)

            #/deliverty/demo/resources/mp4/4/3/1290960871834.mp4
            #http://media4.rtve.es/deliverty/demo/resources/mp4/4/3/1290960871834.mp4
            #http://www.rtve.es/resources/TE_NGVA/mp4/4/3/1290960871834.mp4
            url = "http://www.rtve.es/resources/TE_NGVA" + codigo[-26:]

        except:
            url = ""
    logger.info("[rtve.py] url=" + url)

    itemlist = []
    if url == "":
        logger.info("[rtve.py] Extrayendo URL tipo iPad")
        headers = []
        headers.append([
            "User-Agent",
            "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10"
        ])
        location = scrapertools.get_header_from_response(
            item.url, headers=headers, header_to_get="location")
        logger.info("[rtve.py] location=" + location)

        data = scrapertools.cache_page(location, headers=headers)
        #<a href="/usuarios/sharesend.shtml?urlContent=/resources/TE_SREP63/mp4/4/8/1334334549284.mp4" target
        url = scrapertools.get_match(
            data,
            '<a href="/usuarios/sharesend.shtml\?urlContent\=([^"]+)" target')
        logger.info("[rtve.py] url=" + url)
        #http://www.rtve.es/resources/TE_NGVA/mp4/4/8/1334334549284.mp4
        url = urlparse.urljoin("http://www.rtve.es", url)
        logger.info("[rtve.py] url=" + url)

    if url != "":
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title=item.title,
                 action="play",
                 url=url,
                 thumbnail=thumbnail,
                 plot=item.plot,
                 server="directo",
                 show=item.title,
                 folder=False))

    return itemlist
Beispiel #57
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "[wupload.py] get_video_url( page_url='%s' , user='******' , password='******', video_password=%s)"
        % (page_url, user, "**************************"[0:len(password)],
           video_password))

    if not premium:
        #return get_free_url(page_url)
        logger.info("[wupload.py] free no soportado")
    else:
        # Hace el login y consigue la cookie
        #login_url = "http://www.wupload.es/account/login"
        login_url = "http://www.wupload.com/account/login"
        post = "email=" + user.replace(
            "@",
            "%40") + "&redirect=%2F&password="******"&rememberMe=1"
        location = scrapertools.get_header_from_response(
            url=login_url, header_to_get="location", post=post)
        logger.info("location=" + location)

        if location != "":
            login_url = location

        data = scrapertools.cache_page(url=login_url, post=post)

        # Obtiene la URL final
        headers = scrapertools.get_headers_from_response(page_url)
        location1 = ""
        for header in headers:
            logger.info("header1=" + str(header))

            if header[0] == "location":
                location1 = header[1]
                logger.info("location1=" + str(header))

        # Obtiene la URL final
        headers = scrapertools.get_headers_from_response(location1)
        location2 = ""
        content_disposition = ""
        for header in headers:
            logger.info("header2=" + str(header))

            if header[0] == "location":
                location2 = header[1]

        location = location2
        if location == "":
            location = location1

        return [[
            "(Premium) [wupload]",
            location + "|" + "User-Agent=" + urllib.quote(
                "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"
            )
        ]]

    return []
Beispiel #58
0
def get_free_url(page_url):
    location = scrapertools.get_header_from_response(page_url,
                                                     header_to_get="location")
    if location != "":
        page_url = location

    logger.info("[wupload.py] location=%s" % page_url)

    video_id = extract_id(page_url)
    logger.info("[wupload.py] video_id=%s" % video_id)

    data = scrapertools.cache_page(url=page_url)
    patron = 'href="(.*?start=1.*?)"'
    matches = re.compile(patron).findall(data)
    scrapertools.printMatches(matches)
    if len(matches) == 0:
        logger.error("[wupload.py] No encuentra el enlace Free")
        return []

    # Obtiene link de descarga free
    download_link = matches[0]
    if not download_link.startswith("http://"):
        download_link = urlparse.urljoin(page_url, download_link)

    logger.info("[wupload.py] Link descarga: " + download_link)

    # Descarga el enlace
    headers = []
    headers.append(["X-Requested-With", "XMLHttpRequest"])
    headers.append(["Referer", page_url])
    headers.append([
        "User-Agent",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"
    ])
    headers.append(
        ["Content-Type", "application/x-www-form-urlencoded; charset=UTF-8"])
    headers.append(["Accept-Encoding", "gzip, deflate"])
    headers.append(["Accept", "*/*"])
    headers.append(["Accept-Language", "es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
    headers.append(["Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
    headers.append(["Connection", "keep-alive"])
    headers.append(["Pragma", "no-cache"])
    headers.append(["Cache-Control", "no-cache"])

    data = scrapertools.cache_page(download_link, headers=headers, post="")
    logger.info(data)

    while True:
        # Detecta el tiempo de espera
        patron = "countDownDelay = (\d+)"
        matches = re.compile(patron).findall(data)
        if len(matches) > 0:
            tiempo_espera = int(matches[0])
            logger.info("[wupload.py] tiempo de espera %d segundos" %
                        tiempo_espera)

            #import time
            #time.sleep(tiempo_espera)
            from platformcode.xbmc import xbmctools
            resultado = xbmctools.handle_wait(
                tiempo_espera + 5, "Progreso",
                "Conectando con servidor Wupload (Free)")
            if resultado == False:
                break

            tm = get_match(data, "name='tm' value='([^']+)'")
            tm_hash = get_match(data, "name='tm_hash' value='([^']+)'")
            post = "tm=" + tm + "&tm_hash=" + tm_hash
            data = scrapertools.cache_page(download_link,
                                           headers=headers,
                                           post=post)
            logger.info(data)
        else:
            logger.info("[wupload.py] no encontrado tiempo de espera")

        # Detecta captcha
        patron = "Recaptcha\.create"
        matches = re.compile(patron).findall(data)
        if len(matches) > 0:
            logger.info("[wupload.py] est� pidiendo el captcha")
            recaptcha_key = get_match(data, 'Recaptcha\.create\("([^"]+)"')
            logger.info("[wupload.py] recaptcha_key=" + recaptcha_key)

            data_recaptcha = scrapertools.cache_page(
                "http://www.google.com/recaptcha/api/challenge?k=" +
                recaptcha_key)
            patron = "challenge.*?'([^']+)'"
            challenges = re.compile(patron, re.S).findall(data_recaptcha)
            if (len(challenges) > 0):
                challenge = challenges[0]
                image = "http://www.google.com/recaptcha/api/image?c=" + challenge

                #CAPTCHA
                exec "import seriesly.captcha as plugin"
                tbd = plugin.Keyboard("", "", image)
                tbd.doModal()
                confirmed = tbd.isConfirmed()
                if (confirmed):
                    tecleado = tbd.getText()

                #logger.info("")
                #tecleado = raw_input('Grab ' + image + ' : ')
            post = "recaptcha_challenge_field=%s&recaptcha_response_field=%s" % (
                challenge, tecleado.replace(" ", "+"))
            data = scrapertools.cache_page(download_link,
                                           headers=headers,
                                           post=post)
            logger.info(data)

        else:
            logger.info("[wupload.py] no encontrado captcha")

        # Detecta captcha
        patron = '<p><a href="(http\:\/\/.*?wupload[^"]+)">'
        matches = re.compile(patron).findall(data)
        if len(matches) > 0:
            final_url = matches[0]
            '''
            'GET /download/2616019677/4f0391ba/9bed4add/0/1/580dec58/3317afa30905a31794733c6a32da1987719292ff
            HTTP/1.1
            Accept-Language: es-es,es;q=0.8,en-us;q=0.5,en;q=0.3
            Accept-Encoding: gzip, deflate
            Connection: close\r\nAccept: */*\r\nUser-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12
            Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
            Host: s107.wupload.es
            Referer: http://www.wupload.es/file/2616019677
            Pragma: no-cache
            Cache-Control: no-cache
            Content-Type: application/x-www-form-urlencoded; charset=UTF-8
            00:39:39 T:2956623872  NOTICE: reply:
            00:39:39 T:2956623872  NOTICE: 'HTTP/1.1 200 OK\r\n'
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Server: nginx
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Date: Tue, 03 Jan 2012 23:39:39 GMT
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Content-Type: "application/octet-stream"
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Content-Length: 230336429
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Last-Modified: Tue, 06 Sep 2011 01:07:26 GMT
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Connection: close
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Set-Cookie: dlc=1; expires=Thu, 02-Feb-2012 23:39:39 GMT; path=/; domain=.wupload.es
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: : attachment; filename="BNS609.mp4"
            '''
            logger.info("[wupload.py] link descarga " + final_url)

            return [[
                "(Free)", final_url + '|' + 'Referer=' +
                urllib.quote(page_url) + "&Content-Type=" + urllib.quote(
                    "application/x-www-form-urlencoded; charset=UTF-8") +
                "&Cookie=" + urllib.quote("lastUrlLinkId=" + video_id)
            ]]
        else:
            logger.info("[wupload.py] no detectado link descarga")
Beispiel #59
0
def play(url, xlistitem={}, is_view=None, subtitle="", item=None):
    allocate = True
    try:
        import platform
        xbmc.log(
            "XXX KODI XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        )
        xbmc.log("OS platform: %s %s" %
                 (platform.system(), platform.release()))
        xbmc.log("xbmc/kodi version: %s" %
                 xbmc.getInfoLabel("System.BuildVersion"))
        xbmc_version = int(xbmc.getInfoLabel("System.BuildVersion")[:2])
        xbmc.log("xbmc/kodi version number: %s" % xbmc_version)
        xbmc.log(
            "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX KODI XXXX"
        )

        _platform = get_platform()
        if str(_platform['system']) in [
                "android_armv7", "linux_armv6", "linux_armv7"
        ]:
            allocate = False
        # -- log ------------------------------------------------
        xbmc.log(
            "XXX platform XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        )
        xbmc.log("_platform['system']: %s" % _platform['system'])
        xbmc.log("allocate: %s" % allocate)
        xbmc.log(
            "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX platform XXXX"
        )
        # -- ----------------------------------------------------
    except:
        pass

    DOWNLOAD_PATH = config.get_setting("downloadpath")

    # -- adfly: ------------------------------------
    if url.startswith("http://adf.ly/"):
        try:
            data = httptools.downloadpage(url).data
            url = decode_adfly(data)
        except:
            ddd = xbmcgui.Dialog()
            ddd.ok(
                "alfa-MCT: Sin soporte adf.ly",
                "El script no tiene soporte para el acortador de urls adf.ly.",
                "", "url: " + url)
            return

    # -- Necesario para algunas webs ----------------------------
    if not url.endswith(".torrent") and not url.startswith("magnet"):
        t_file = scrapertools.get_header_from_response(
            url, header_to_get="location")
        if len(t_file) > 0:
            url = t_file
            t_file = scrapertools.get_header_from_response(
                url, header_to_get="location")
        if len(t_file) > 0:
            url = t_file

    # -- Crear dos carpetas en descargas para los archivos ------
    save_path_videos = os.path.join(DOWNLOAD_PATH, "torrent-videos")
    save_path_torrents = os.path.join(DOWNLOAD_PATH, "torrent-torrents")
    if not os.path.exists(save_path_torrents): os.mkdir(save_path_torrents)

    # -- Usar - archivo torrent desde web, magnet o HD ---------
    if not os.path.isfile(url) and not url.startswith("magnet"):
        # -- http - crear archivo torrent -----------------------
        data = url_get(url)
        # -- El nombre del torrent será el que contiene en los --
        # -- datos.                                             -
        re_name = urllib.unquote(
            scrapertools.scrapertools.find_single_match(
                data, ':name\d+:(.*?)\d+:'))
        torrent_file = filetools.join(save_path_torrents,
                                      filetools.encode(re_name + '.torrent'))

        f = open(torrent_file, 'wb')
        f.write(data)
        f.close()
    elif os.path.isfile(url):
        # -- file - para usar torrens desde el HD ---------------
        torrent_file = url
    else:
        # -- magnet ---------------------------------------------
        torrent_file = url
    # -----------------------------------------------------------

    # -- MCT - MiniClienteTorrent -------------------------------
    ses = lt.session()

    # -- log ----------------------------------------------------
    xbmc.log("### Init session ########")
    xbmc.log(lt.version)
    xbmc.log("#########################")
    # -- --------------------------------------------------------

    ses.add_dht_router("router.bittorrent.com", 6881)
    ses.add_dht_router("router.utorrent.com", 6881)
    ses.add_dht_router("dht.transmissionbt.com", 6881)

    trackers = [
        "udp://tracker.openbittorrent.com:80/announce",
        "http://tracker.torrentbay.to:6969/announce",
        "http://tracker.pow7.com/announce",
        "udp://tracker.ccc.de:80/announce",
        "udp://open.demonii.com:1337",
        "http://9.rarbg.com:2710/announce",
        "http://bt.careland.com.cn:6969/announce",
        "http://explodie.org:6969/announce",
        "http://mgtracker.org:2710/announce",
        "http://tracker.best-torrents.net:6969/announce",
        "http://tracker.tfile.me/announce",
        "http://tracker1.wasabii.com.tw:6969/announce",
        "udp://9.rarbg.com:2710/announce",
        "udp://9.rarbg.me:2710/announce",
        "udp://coppersurfer.tk:6969/announce",
        "http://www.spanishtracker.com:2710/announce",
        "http://www.todotorrents.com:2710/announce",
    ]

    video_file = ""
    # -- magnet2torrent -----------------------------------------
    if torrent_file.startswith("magnet"):
        try:
            import zlib
            btih = hex(
                zlib.crc32(
                    scrapertools.scrapertools.find_single_match(
                        torrent_file,
                        'magnet:\?xt=urn:(?:[A-z0-9:]+|)([A-z0-9]{32})'))
                & 0xffffffff)
            files = [
                f for f in os.listdir(save_path_torrents)
                if os.path.isfile(os.path.join(save_path_torrents, f))
            ]
            for file in files:
                if btih in os.path.basename(file):
                    torrent_file = os.path.join(save_path_torrents, file)
        except:
            pass

    if torrent_file.startswith("magnet"):
        try:
            tempdir = tempfile.mkdtemp()
        except IOError:
            tempdir = os.path.join(save_path_torrents, "temp")
            if not os.path.exists(tempdir):
                os.mkdir(tempdir)
        params = {
            'save_path': tempdir,
            'trackers': trackers,
            'storage_mode': lt.storage_mode_t.storage_mode_allocate,
            'paused': False,
            'auto_managed': True,
            'duplicate_is_error': True
        }
        h = lt.add_magnet_uri(ses, torrent_file, params)
        dp = xbmcgui.DialogProgress()
        dp.create('alfa-MCT')
        while not h.has_metadata():
            message, porcent, msg_file, s, download = getProgress(
                h, "Creando torrent desde magnet")
            dp.update(porcent, message, msg_file)
            if s.state == 1: download = 1
            if dp.iscanceled():
                dp.close()
                remove_files(download, torrent_file, video_file, ses, h)
                return
            h.force_dht_announce()
            xbmc.sleep(1000)

        dp.close()
        info = h.get_torrent_info()
        data = lt.bencode(lt.create_torrent(info).generate())

        torrent_file = os.path.join(
            save_path_torrents,
            unicode(info.name() + "-" + btih, "'utf-8'", errors="replace") +
            ".torrent")
        f = open(torrent_file, 'wb')
        f.write(data)
        f.close()
        ses.remove_torrent(h)
        shutil.rmtree(tempdir)
    # -----------------------------------------------------------

    # -- Archivos torrent ---------------------------------------
    e = lt.bdecode(open(torrent_file, 'rb').read())
    info = lt.torrent_info(e)

    # -- El más gordo o uno de los más gordo se entiende que es -
    # -- el vídeo o es el vídeo que se usará como referencia    -
    # -- para el tipo de archivo                                -
    xbmc.log("##### Archivos ## %s ##" % len(info.files()))
    _index_file, _video_file, _size_file = get_video_file(info)

    # -- Prioritarizar/Seleccionar archivo-----------------------
    _index, video_file, video_size, len_files = get_video_files_sizes(info)
    if len_files == 0:
        dp = xbmcgui.Dialog().ok(
            "No se puede reproducir",
            "El torrent no contiene ningún archivo de vídeo")

    if _index == -1:
        _index = _index_file
        video_file = _video_file
        video_size = _size_file

    _video_file_ext = os.path.splitext(_video_file)[1]
    xbmc.log("##### _video_file_ext ## %s ##" % _video_file_ext)
    if (_video_file_ext == ".avi" or _video_file_ext == ".mp4") and allocate:
        xbmc.log("##### storage_mode_t.storage_mode_allocate (" +
                 _video_file_ext + ") #####")
        h = ses.add_torrent({
            'ti':
            info,
            'save_path':
            save_path_videos,
            'trackers':
            trackers,
            'storage_mode':
            lt.storage_mode_t.storage_mode_allocate
        })
    else:
        xbmc.log("##### storage_mode_t.storage_mode_sparse (" +
                 _video_file_ext + ") #####")
        h = ses.add_torrent({
            'ti':
            info,
            'save_path':
            save_path_videos,
            'trackers':
            trackers,
            'storage_mode':
            lt.storage_mode_t.storage_mode_sparse
        })
        allocate = True
    # -----------------------------------------------------------

    # -- Descarga secuencial - trozo 1, trozo 2, ... ------------
    h.set_sequential_download(True)

    h.force_reannounce()
    h.force_dht_announce()

    # -- Inicio de variables para 'pause' automático cuando el  -
    # -- el vídeo se acerca a una pieza sin completar           -
    is_greater_num_pieces = False
    is_greater_num_pieces_plus = False
    is_greater_num_pieces_pause = False

    porcent4first_pieces = int(video_size * 0.000000005)
    if porcent4first_pieces < 10: porcent4first_pieces = 10
    if porcent4first_pieces > 100: porcent4first_pieces = 100
    porcent4last_pieces = int(porcent4first_pieces / 2)

    num_pieces_to_resume = int(video_size * 0.0000000025)
    if num_pieces_to_resume < 5: num_pieces_to_resume = 5
    if num_pieces_to_resume > 25: num_pieces_to_resume = 25

    xbmc.log("##### porcent4first_pieces ## %s ##" % porcent4first_pieces)
    xbmc.log("##### porcent4last_pieces ## %s ##" % porcent4last_pieces)
    xbmc.log("##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume)

    # -- Prioritarizar o seleccionar las piezas del archivo que -
    # -- se desea reproducir con 'file_priorities'              -
    piece_set = set_priority_pieces(h, _index, video_file, video_size,
                                    porcent4first_pieces, porcent4last_pieces,
                                    allocate)

    # -- Crear diálogo de progreso para el primer bucle ---------
    dp = xbmcgui.DialogProgress()
    dp.create('alfa-MCT')

    _pieces_info = {}

    # -- Doble bucle anidado ------------------------------------
    # -- Descarga - Primer bucle                                -
    while not h.is_seed():
        s = h.status()

        xbmc.sleep(100)

        # -- Recuperar los datos del progreso -------------------
        message, porcent, msg_file, s, download = getProgress(h,
                                                              video_file,
                                                              _pf=_pieces_info)

        # -- Si hace 'checking' existe descarga -----------------
        # -- 'download' Se usará para saber si hay datos        -
        # -- descargados para el diálogo de 'remove_files'      -
        if s.state == 1: download = 1

        # -- Player - play --------------------------------------
        # -- Comprobar si se han completado las piezas para el  -
        # -- inicio del vídeo                                   -
        first_pieces = True

        _c = 0
        for i in range(piece_set[0], piece_set[porcent4first_pieces]):
            first_pieces &= h.have_piece(i)
            if h.have_piece(i): _c += 1
        _pieces_info = {
            'current': 0,
            'continuous': "%s/%s" % (_c, porcent4first_pieces),
            'continuous2': "",
            'have': h.status().num_pieces,
            'len': len(piece_set)
        }

        last_pieces = True
        if not allocate:
            _c = len(piece_set) - 1
            _cc = 0
            for i in range(
                    len(piece_set) - porcent4last_pieces, len(piece_set)):
                last_pieces &= h.have_piece(i)
                if h.have_piece(i):
                    _c -= 1
                    _cc += 1
            _pieces_info['continuous2'] = "[%s/%s] " % (_cc,
                                                        porcent4last_pieces)

        if is_view != "Ok" and first_pieces and last_pieces:
            _pieces_info['continuous2'] = ""
            xbmc.log("##### porcent [%.2f%%]" % (s.progress * 100))
            is_view = "Ok"
            dp.close()

            # -- Player - Ver el vídeo --------------------------
            playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
            playlist.clear()

            ren_video_file = os.path.join(save_path_videos, video_file)
            try:
                playlist.add(ren_video_file, xlistitem)
            except:
                playlist.add(ren_video_file)

            if xbmc_version < 17:
                player = play_video(xbmc.PLAYER_CORE_AUTO)
            else:
                player = play_video()
            player.play(playlist)

            # -- Contador de cancelaciones para la ventana de   -
            # -- 'pause' automático                             -
            is_greater_num_pieces_canceled = 0
            continuous_pieces = 0
            porcent_time = 0.00
            current_piece = 0
            set_next_continuous_pieces = porcent4first_pieces

            # -- Impedir que kodi haga 'resume' a un archivo ----
            # -- que se reprodujo con anterioridad y que se     -
            # -- eliminó para impedir que intente la reprucción -
            # -- en una pieza que aún no se ha completado y se  -
            # -- active 'pause' automático                      -
            not_resume = True

            # -- Bandera subTítulos
            _sub = False

            # -- Segundo bucle - Player - Control de eventos ----
            while player.isPlaying():
                xbmc.sleep(100)

                # -- Añadir subTítulos
                if subtitle != "" and not _sub:
                    _sub = True
                    player.setSubtitles(subtitle)

                # -- Impedir que kodi haga 'resume' al inicio ---
                # -- de la descarga de un archivo conocido      -
                if not_resume:
                    player.seekTime(0)
                    not_resume = False

                # -- Control 'pause' automático                 -
                continuous_pieces = count_completed_continuous_pieces(
                    h, piece_set)

                if xbmc.Player().isPlaying():

                    # -- Porcentage del progreso del vídeo ------
                    player_getTime = player.getTime()
                    player_getTotalTime = player.getTotalTime()
                    porcent_time = player_getTime / player_getTotalTime * 100

                    # -- Pieza que se está reproduciendo --------
                    current_piece = int(porcent_time / 100 * len(piece_set))

                    # -- Banderas de control --------------------
                    is_greater_num_pieces = (
                        current_piece >
                        continuous_pieces - num_pieces_to_resume)
                    is_greater_num_pieces_plus = (
                        current_piece + porcent4first_pieces >
                        continuous_pieces)
                    is_greater_num_pieces_finished = (
                        current_piece + porcent4first_pieces >= len(piece_set))

                    # -- Activa 'pause' automático --------------
                    if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
                        is_greater_num_pieces_pause = True
                        player.pause()

                    if continuous_pieces >= set_next_continuous_pieces:
                        set_next_continuous_pieces = continuous_pieces + num_pieces_to_resume
                    next_continuous_pieces = str(
                        continuous_pieces -
                        current_piece) + "/" + str(set_next_continuous_pieces -
                                                   current_piece)
                    _pieces_info = {
                        'current': current_piece,
                        'continuous': next_continuous_pieces,
                        'continuous2': _pieces_info['continuous2'],
                        'have': h.status().num_pieces,
                        'len': len(piece_set)
                    }

                    # si es un archivo de la videoteca enviar a marcar como visto
                    if item.strm_path:
                        from platformcode import xbmc_videolibrary
                        xbmc_videolibrary.mark_auto_as_watched(item)

                # -- Cerrar el diálogo de progreso --------------
                if player.resumed:
                    dp.close()

                # -- Mostrar el diálogo de progreso -------------
                if player.paused:
                    # -- Crear diálogo si no existe -------------
                    if not player.statusDialogoProgress:
                        dp = xbmcgui.DialogProgress()
                        dp.create('alfa-MCT')
                        player.setDialogoProgress()

                    # -- Diálogos de estado en el visionado -----
                    if not h.is_seed():
                        # -- Recuperar los datos del progreso ---
                        message, porcent, msg_file, s, download = getProgress(
                            h, video_file, _pf=_pieces_info)
                        dp.update(porcent, message, msg_file)
                    else:
                        dp.update(100, "Descarga completa: " + video_file)

                    # -- Se canceló el progreso en el visionado -
                    # -- Continuar                              -
                    if dp.iscanceled():
                        dp.close()
                        player.pause()

                    # -- Se canceló el progreso en el visionado -
                    # -- en la ventana de 'pause' automático.   -
                    # -- Parar si el contador llega a 3         -
                    if dp.iscanceled() and is_greater_num_pieces_pause:
                        is_greater_num_pieces_canceled += 1
                        if is_greater_num_pieces_canceled == 3:
                            player.stop()

                    # -- Desactiva 'pause' automático y ---------
                    # -- reinicia el contador de cancelaciones  -
                    if not dp.iscanceled(
                    ) and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
                        dp.close()
                        player.pause()
                        is_greater_num_pieces_pause = False
                        is_greater_num_pieces_canceled = 0

                    # -- El usuario cancelo el visionado --------
                    # -- Terminar                               -
                    if player.ended:
                        # -- Diálogo eliminar archivos ----------
                        remove_files(download, torrent_file, video_file, ses,
                                     h)
                        return

        # -- Kodi - Se cerró el visionado -----------------------
        # -- Continuar | Terminar                               -
        if is_view == "Ok" and not xbmc.Player().isPlaying():

            if info.num_files() == 1:
                # -- Diálogo continuar o terminar ---------------
                d = xbmcgui.Dialog()
                ok = d.yesno('alfa-MCT', 'XBMC-Kodi Cerró el vídeo.',
                             '¿Continuar con la sesión?')
            else:
                ok = False
            # -- SI ---------------------------------------------
            if ok:
                # -- Continuar: ---------------------------------
                is_view = None
            else:
                # -- Terminar: ----------------------------------
                # -- Comprobar si el vídeo pertenece a una ------
                # -- lista de archivos                          -
                _index, video_file, video_size, len_files = get_video_files_sizes(
                    info)
                if _index == -1 or len_files == 1:
                    # -- Diálogo eliminar archivos --------------
                    remove_files(download, torrent_file, video_file, ses, h)
                    return
                else:
                    # -- Lista de archivos. Diálogo de opciones -
                    piece_set = set_priority_pieces(h, _index, video_file,
                                                    video_size,
                                                    porcent4first_pieces,
                                                    porcent4last_pieces,
                                                    allocate)
                    is_view = None
                    dp = xbmcgui.DialogProgress()
                    dp.create('alfa-MCT')

        # -- Mostar progeso antes del visionado -----------------
        if is_view != "Ok":
            dp.update(porcent, message, msg_file)

        # -- Se canceló el progreso antes del visionado ---------
        # -- Terminar                                           -
        if dp.iscanceled():
            dp.close()
            # -- Comprobar si el vídeo pertenece a una lista de -
            # -- archivos                                       -
            _index, video_file, video_size, len_files = get_video_files_sizes(
                info)
            if _index == -1 or len_files == 1:
                # -- Diálogo eliminar archivos ------------------
                remove_files(download, torrent_file, video_file, ses, h)
                return
            else:
                # -- Lista de archivos. Diálogo de opciones -----
                piece_set = set_priority_pieces(h, _index, video_file,
                                                video_size,
                                                porcent4first_pieces,
                                                porcent4last_pieces, allocate)
                is_view = None
                dp = xbmcgui.DialogProgress()
                dp.create('alfa-MCT')

    # -- Kodi - Error? - No debería llegar aquí -----------------
    if is_view == "Ok" and not xbmc.Player().isPlaying():
        dp.close()
        # -- Diálogo eliminar archivos --------------------------
        remove_files(download, torrent_file, video_file, ses, h)

    return
Beispiel #60
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[filenium.py] get_video_url(page_url='%s')" % page_url)
    location=""
    page_url = correct_url(page_url)
    if premium:
        # Hace el login
        if "?.torrent" in page_url:
            location = page_url.replace("?.torrent","")        
        else:
            url = "http://filenium.com/welcome"
            post = "username=%s&password=%s" % (user,password)
            data = scrapertools.cache_page(url, post=post, timeout=TIMEOUT)
            link = urlencode({'filez':page_url})
            location = scrapertools.cache_page("http://filenium.com/?filenium&" + link, timeout=TIMEOUT)

        user = user.replace("@","%40")
        
        #logger.info("[filenium.py] torrent url (location='%s')" % location)
        
        location = location.replace("http://","http://"+user+":"+password+"@")
        '''
        if "xbmc" in config.get_platform():
            #location = location.replace("http://cdn.filenium.com","http://"+user+":"+password+"@cdn.filenium.com")
            location = location.replace("http://","http://"+user+":"+password+"@")
        else:
            location = location.replace("/?.zip","")
            user = user.replace(".","%2e")
            location = location + "?user="******"&passwd="+password
        '''

        logger.info("location="+location)

        # Averigua la redirección, para que funcione en Plex y WiiMC
        try:
            location2 = scrapertools.get_header_from_response(location,header_to_get="Location")
            logger.info("location2="+location2)
        except:
            location2=""

        if location2!="":
            location=location2

        '''
        if not location.startswith("http") and page_url.endswith(".torrent"):
            # Lee el id
            data=json.loads(location)
            logger.info("data="+str(data))
            name = data['name']

            datas = scrapertools.cachePage("http://filenium.com/xbmc_json", timeout=TIMEOUT)
            logger.info(datas)
            data = json.loads(datas)
            logger.info(str(data))
            
            for match in data:
                if match['status'] == "COMPLETED" and match['filename'].startswith(name):
                    location = match['download_url'] + "?.torrent"
                    logger.info("location="+location)
                    break
        '''

    return location