def geturl(bliptv_url,recurse=True):
    logger.info("[bliptv.py] bliptv_url="+bliptv_url)

    devuelve = ""

    if bliptv_url.startswith("http://blip.tv/play"):    
        redirect = scrapertools.getLocationHeaderFromResponse(bliptv_url)
        logger.info("[bliptv.py] redirect="+redirect)
        
        patron='file\=(.*?)$'
        matches = re.compile(patron).findall(redirect)
        logger.info("[bliptv.py] matches1=%d" % len(matches))
        
        if len(matches)==0:
            patron='file\=([^\&]+)\&'
            matches = re.compile(patron).findall(redirect)
            logger.info("[bliptv.py] matches2=%d" % len(matches))
        
        if len(matches)>0:
            url = matches[0]
            logger.info("[bliptv.py] url="+url)
            url = urllib.unquote(url)
            logger.info("[bliptv.py] url="+url)

            data = scrapertools.cache_page(url)
            logger.info(data)
            patron = '<media\:content url\="([^"]+)" blip\:role="([^"]+)".*?type="([^"]+)"[^>]+>'
            matches = re.compile(patron).findall(data)
            scrapertools.printMatches(matches)

            for match in matches:
                logger.info("url="+str(match[0]))
                devuelve = match[0]

    return devuelve
Пример #2
0
def play(item):
    logger.info("[publicotv.py] play")
    itemlist=[]

    #http://video.publico.es/videos/9/54777/1/recent
    '''
    1) La URL de detalle que encuentra ese patrón de arriba es del tipo: http://video.publico.es/videos/9/51046/1/recent
    2) El código en negrita tienes que usarlo para invocar a otra URL que te dará la ubicación del vídeo: http://video.publico.es/videos/v_video/51046
    3) En la respuesta de esa URL tienes el vídeo, dentro de la cabecera "Location" que he resaltado en negrita.

    HTTP/1.1 302 Found
    Date: Mon, 09 Nov 2009 13:34:14 GMT
    Server: Apache/2.2.3 (Red Hat)
    X-Powered-By: PHP/5.1.6
    Location: http://mm.publico.es/files/flvs/51046.49118.flv
    Content-Encoding: gzip
    Vary: Accept-Encoding
    Content-Length: 26
    Keep-Alive: timeout=5, max=77
    Connection: Keep-Alive
    Content-Type: text/html; charset=utf-8
    '''
    patron = 'http\:\/\/video.publico.es\/videos\/[^\/]+/([^\/]+)/'
    matches = re.compile(patron,re.DOTALL).findall(item.url)
    if DEBUG: scrapertools.printMatches(matches)
    
    url = 'http://video.publico.es/videos/v_video/'+matches[0]
    logger.info("url="+url)
    
    url = scrapertools.getLocationHeaderFromResponse(url)

    itemlist.append( Item(channel=CHANNELNAME, title=item.title , server = "directo" , action="play" , url=url, thumbnail=item.thumbnail, folder=False) )

    return itemlist
Пример #3
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "deportesalacarta.servers.youwatch get_video_url(page_url='%s')" %
        page_url)

    redireccion = scrapertools.getLocationHeaderFromResponse(page_url)
    data = scrapertools.cache_page(redireccion)
    url_redirect = scrapertools.find_single_match(data,
                                                  '<iframe src="([^"]+)"')
    data = scrapertools.cache_page(url_redirect)

    url = scrapertools.get_match(data, '{file:"([^"]+)"')
    video_url = url + "|Referer=" + url_redirect
    video_urls = [[
        scrapertools.get_filename_from_url(url)[-4:] + " [youwatch]", video_url
    ]]

    for video_url in video_urls:
        logger.info("deportesalacarta.servers.youwatch %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Пример #4
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[bayfiles.py] get_video_url("+page_url+")")
    from servers import servertools
    video_urls = []

    data = scrapertools.cache_page(page_url)
    try:
        vfid = re.compile('var vfid = ([^;]+);').findall(data)[0]
    except:
        logger.info("[bayfiles.py] Error no encontro vfid")
        return ''
    try:
        delay = re.compile('var delay = ([^;]+);').findall(data)[0]
        delay = int(delay)
    except:
        delay = 300

    logger.info("[bayfiles.py] vfid="+vfid)
    logger.info("[bayfiles.py] delay="+str(delay))

    from platformcode.xbmc import xbmctools
 
    t = millis()
    #http://bayfiles.com/ajax_download?_=1336330599281&action=startTimer&vfid=2174049
    url_token = "http://bayfiles.com/ajax_download?_=%s&action=startTimer&vfid=%s"%(t,vfid)
    data = scrapertools.cache_page(url_token)
    logger.info("data="+data)
    datajson = load_json(data)

    if datajson['set']==True:
        token=datajson['token']
        resultado = xbmctools.handle_wait(delay,"Progreso","Conectando con servidor BayFiles (Free)")
        #if resultado == False:
            
        url_ajax = 'http://bayfiles.com/ajax_download'
        post = "action=getLink&vfid=%s&token=%s" %(vfid,token)
        data = scrapertools.cache_page( url_ajax , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
    
        # Extrae la url del video
        patron = 'onclick="javascript:window.location.href = \'(.+?)\''
        matches = re.compile(patron,re.DOTALL).findall(data)
        #scrapertools.printMatches(matches)
        
        if len(matches)>0:
            mediaurl = matches[0]
            try:
                location = scrapertools.getLocationHeaderFromResponse(mediaurl)
                if location:
                    mediaurl = location
            except:
                logger.info("Error al redireccionar")
            mediaurl = mediaurl + "|Referer="+urllib.quote(page_url)
            video_urls.append( ["."+mediaurl.rsplit('.',1)[1]+" [bayfiles]",mediaurl,60])

    for video_url in video_urls:
        logger.info("[bayfiles.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Пример #5
0
def findvideos(item):
    logger.info("streamondemand.animeforce findvideos")

    itemlist = []

    if item.extra:
        data = scrapertools.cache_page(item.url, headers=headers)

        blocco = scrapertools.get_match(data, r'%s(.*?)</tr>' % item.extra)
        scrapedurl = scrapertools.find_single_match(
            blocco, r'<a href="([^"]+)"[^>]+>')
        url = scrapedurl
    else:
        url = item.url

    if 'adf.ly' in url:
        url = adfly.get_long_url(url)
    elif 'bit.ly' in url:
        url = scrapertools.getLocationHeaderFromResponse(url)

    if 'animeforce' in url:
        headers.append(['Referer', item.url])
        data = scrapertools.cache_page(url, headers=headers)
        itemlist.extend(servertools.find_video_items(data=data))

        for videoitem in itemlist:
            videoitem.title = item.title + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

        url = url.split('&')[0]
        data = scrapertools.cache_page(url, headers=headers)
        patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
        matches = re.compile(patron, re.DOTALL).findall(data)
        headers.append(['Referer', url])
        for video in matches:
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=item.title,
                     url=video + '|' + urllib.urlencode(dict(headers)),
                     folder=False))
    else:
        itemlist.extend(servertools.find_video_items(data=url))

        for videoitem in itemlist:
            videoitem.title = item.title + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

    return itemlist
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    data = scrapertools.cache_page(item.url)
    if "Redireccionando" in data: data = scrapertools.cache_page(item.url)
    enlace = scrapertools.find_single_match(data, 'class="btn" href="([^"]+)"')
    location = scrapertools.getLocationHeaderFromResponse(enlace)
    enlaces = servertools.findvideos(data=location)
    if len(enlaces)> 0:
        titulo = "Enlace encontrado en "+enlaces[0][0]
        itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, folder=False) )
    return itemlist
Пример #7
0
def play(item):
    logger.info("pelisalacarta.channels.seriecanal play")
    itemlist = []
    videolist = []
    if item.extra == "torrent":
        itemlist.append( Item(channel=__channel__, action="play" , server="torrent", title=item.title, url=item.url))
    else:
        #Extrae url de enlace bit.ly
        if item.url.startswith("http://bit.ly/"):
            item.url = scrapertools.getLocationHeaderFromResponse(item.url)
        itemlist.append( Item(channel=__channel__, action="play" , server=item.extra, title=item.title, url=item.url))

    return itemlist
Пример #8
0
def play(item):
    logger.info("pelisalacarta.channels.tremendaseries play")
    print item.url
    
    data= scrapertools.cache_page(item.url)
    url_base = 'http://tremendaseries.com/saliendo'
    patron = url_base + '(.*?)">'
    data2 = url_base + scrapertools.find_single_match(data,patron)
    data2 = scrapertools.getLocationHeaderFromResponse(data2)
    logger.info("pelisalacarta.channels.tremendaseries data2="+data2)

    itemlist = servertools.find_video_items(data=data2)
    
    return itemlist    
Пример #9
0
def play(item):
    logger.info("pelisalacarta.channels.tremendaseries play")
    print item.url

    data = scrapertools.cache_page(item.url)
    url_base = 'http://tremendaseries.com/saliendo'
    patron = url_base + '(.*?)">'
    data2 = url_base + scrapertools.find_single_match(data, patron)
    data2 = scrapertools.getLocationHeaderFromResponse(data2)
    logger.info("pelisalacarta.channels.tremendaseries data2=" + data2)

    itemlist = servertools.find_video_items(data=data2)

    return itemlist
def findvideos(item):
    logger.info("streamondemand.animeforce findvideos")

    itemlist = []

    if item.extra:
        data = scrapertools.cache_page(item.url, headers=headers)

        blocco = scrapertools.get_match(data, r'%s(.*?)</tr>' % item.extra)
        scrapedurl = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>')
        url = scrapedurl
    else:
        url = item.url

    if 'adf.ly' in url:
        url = adfly.get_long_url(url)
    elif 'bit.ly' in url:
        url = scrapertools.getLocationHeaderFromResponse(url)

    if 'animeforce' in url:
        headers.append(['Referer', item.url])
        data = scrapertools.cache_page(url, headers=headers)
        itemlist.extend(servertools.find_video_items(data=data))

        for videoitem in itemlist:
            videoitem.title = item.title + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

        url = url.split('&')[0]
        data = scrapertools.cache_page(url, headers=headers)
        patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
        matches = re.compile(patron, re.DOTALL).findall(data)
        headers.append(['Referer', url])
        for video in matches:
            itemlist.append(Item(channel=__channel__, action="play", title=item.title, url=video + '|' + urllib.urlencode(dict(headers)), folder=False))
    else:
        itemlist.extend(servertools.find_video_items(data=url))

        for videoitem in itemlist:
            videoitem.title = item.title + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

    return itemlist
Пример #11
0
def decode_hidden(text, number):
    text = scrapertools.decodeHtmlentities(text)
    text = text.replace("&gt9", ">").replace("&quot9", '"').replace("&lt9", '<').replace("&amp9", '&')
    s = []
    for char in text:
        j = ord(char)
        s.append(chr(33 + ((j+14) % 94)))

    temp = "".join(s)
    text_decode = temp[0:-1] + chr(ord(temp[-1]) + int(number))
    videourl = "https://openload.co/stream/{0}?mime=true".format(text_decode)
    videourl = scrapertools.getLocationHeaderFromResponse(videourl)
    videourl = videourl.replace("https", "http").replace("?mime=true", "")

    return videourl
Пример #12
0
def decode_hidden(text, number):
    text = scrapertools.decodeHtmlentities(text)
    text = text.replace("&gt9", ">").replace("&quot9", '"').replace("&lt9", '<').replace("&amp9", '&')
    s = []
    for char in text:
        j = ord(char)
        s.append(chr(33 + ((j+14) % 94)))

    temp = "".join(s)
    text_decode = temp[0:-1] + chr(ord(temp[-1]) + int(number))
    videourl = "https://openload.co/stream/{0}?mime=true".format(text_decode)
    videourl = scrapertools.getLocationHeaderFromResponse(videourl)
    videourl = videourl.replace("https", "http").replace("?mime=true", "")

    return videourl
Пример #13
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.youwatch get_video_url(page_url='%s')" % page_url)

    redireccion = scrapertools.getLocationHeaderFromResponse(page_url)
    data = scrapertools.cache_page(redireccion)
    url_redirect = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
    data = scrapertools.cache_page(url_redirect)

    url = scrapertools.get_match(data, '{file:"([^"]+)"')
    video_url = url + "|Referer=" + url_redirect
    video_urls = [[scrapertools.get_filename_from_url(url)[-4:] + " [youwatch]", video_url]]

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.youwatch %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Пример #14
0
def play(item):
    logger.info()
    itemlist = []
    if item.extra == "torrent":
        itemlist.append(item.clone())
    else:
        # Extrae url de enlace bit.ly
        if item.url.startswith("http://bit.ly/"):
            item.url = scrapertools.getLocationHeaderFromResponse(item.url)
        video_list = servertools.findvideos(item.url)
        if video_list:
            url = video_list[0][1]
            server = video_list[0][2]
        itemlist.append(item.clone(server=server, url=url))

    return itemlist
Пример #15
0
def play(item):
    logger.info("pelisalacarta.channels.seriecanal play")
    itemlist = []

    if item.extra == "torrent":
        itemlist.append(item.clone())
    else:
        #Extrae url de enlace bit.ly
        if item.url.startswith("http://bit.ly/"):
            item.url = scrapertools.getLocationHeaderFromResponse(item.url)
        video_list = servertools.findvideos(item.url)
        if video_list:
            url = video_list[0][1]
            server = video_list[0][2]
        itemlist.append(item.clone(server=server, url=url))

    return itemlist
Пример #16
0
def get_link_api(page_url):
    from core import jsontools
    file_id = scrapertools.find_single_match(page_url, 'embed/([0-9a-zA-Z-_]+)')
    login = "******"
    key = "AQFO3QJQ"
    data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, login, key))
    data = jsontools.load_json(data)
    if data["status"] == 200:
        ticket = data["result"]["ticket"]
        data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket))
        data = jsontools.load_json(data)
        extension = "." + scrapertools.find_single_match(data["result"]["content_type"], '/(\w+)')
        videourl = data['result']['url'] + '?mime=true'
        videourl = scrapertools.getLocationHeaderFromResponse(videourl)
        videourl = videourl.replace("https", "http").replace("?mime=true", "")
        return videourl

    return ""
Пример #17
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = httptools.downloadpage(page_url).data

    media_url = scrapertools.find_single_match(data, 'linkfile ="([^"]+)"')

    media_url = scrapertools.getLocationHeaderFromResponse(media_url)

    video_urls.append(
        ["." + media_url.rsplit('.', 1)[1] + ' [speedvideo]', media_url])

    return video_urls
Пример #18
0
def play(item):
    logger.info("[publicotv.py] play")
    itemlist = []

    #http://video.publico.es/videos/9/54777/1/recent
    '''
    1) La URL de detalle que encuentra ese patrón de arriba es del tipo: http://video.publico.es/videos/9/51046/1/recent
    2) El código en negrita tienes que usarlo para invocar a otra URL que te dará la ubicación del vídeo: http://video.publico.es/videos/v_video/51046
    3) En la respuesta de esa URL tienes el vídeo, dentro de la cabecera "Location" que he resaltado en negrita.

    HTTP/1.1 302 Found
    Date: Mon, 09 Nov 2009 13:34:14 GMT
    Server: Apache/2.2.3 (Red Hat)
    X-Powered-By: PHP/5.1.6
    Location: http://mm.publico.es/files/flvs/51046.49118.flv
    Content-Encoding: gzip
    Vary: Accept-Encoding
    Content-Length: 26
    Keep-Alive: timeout=5, max=77
    Connection: Keep-Alive
    Content-Type: text/html; charset=utf-8
    '''
    patron = 'http\:\/\/video.publico.es\/videos\/[^\/]+/([^\/]+)/'
    matches = re.compile(patron, re.DOTALL).findall(item.url)
    if DEBUG: scrapertools.printMatches(matches)

    url = 'http://video.publico.es/videos/v_video/' + matches[0]
    logger.info("url=" + url)

    url = scrapertools.getLocationHeaderFromResponse(url)

    itemlist.append(
        Item(channel=CHANNELNAME,
             title=item.title,
             server="directo",
             action="play",
             url=url,
             thumbnail=item.thumbnail,
             folder=False))

    return itemlist
Пример #19
0
def play(item):
    logger.info("pelisalacarta.channels.seriecanal play")
    itemlist = []
    videolist = []
    if item.extra == "torrent":
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="torrent",
                 title=item.title,
                 url=item.url))
    else:
        #Extrae url de enlace bit.ly
        if item.url.startswith("http://bit.ly/"):
            item.url = scrapertools.getLocationHeaderFromResponse(item.url)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server=item.extra,
                 title=item.title,
                 url=item.url))

    return itemlist
Пример #20
0
def play(item):
    logger.info("[seriesyonkis.py] play")
    itemlist = []

    # Descarga la página de reproducción de este episodio y server
    #<a href="/s/y/597157/0/s/1244" target="_blank">Reproducir ahora</a>
    logger.info("[seriesyonkis.py] play url=" + item.url)
    data = scrapertools.cache_page(item.url)
    patron = '<a href="([^"]+)" target="_blank">\s*Reproducir ahora\s*</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) == 0:
        patron = '<a href="([^"]+)" target="_blank">\s*Descargar ahora\s*</a>'
        matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) == 0:
        logger.info(
            "[seriesyonkis.py] play ERROR, no encuentro el enlace 'Reproducir ahora' o 'Descargar ahora'"
        )
        return []

    playurl = urlparse.urljoin(item.url, matches[0])
    logger.info("[seriesyonkis.py] play url=" + playurl)

    try:
        location = scrapertools.getLocationHeaderFromResponse(playurl)
        logger.info("[seriesyonkis.py] play location=" + location)

        if location <> "":
            logger.info("[seriesyonkis.py] Busca videos conocidos en la url")
            videos = servertools.findvideos(location)

            if len(videos) == 0:
                location = scrapertools.getLocationHeaderFromResponse(location)
                logger.info("[seriesyonkis.py] play location=" + location)

                if location <> "":
                    logger.info(
                        "[seriesyonkis.py] Busca videos conocidos en la url")
                    videos = servertools.findvideos(location)

                    if len(videos) == 0:
                        logger.info(
                            "[seriesyonkis.py] play downloading location")
                        data = scrapertools.cache_page(location)
                        logger.info(
                            "------------------------------------------------------------"
                        )
                        #logger.info(data)
                        logger.info(
                            "------------------------------------------------------------"
                        )
                        videos = servertools.findvideos(data)
                        logger.info(str(videos))
                        logger.info(
                            "------------------------------------------------------------"
                        )
        else:
            logger.info("[seriesyonkis.py] play location vacía")
            videos = []

        if (len(videos) > 0):
            url = videos[0][1]
            server = videos[0][2]
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=item.title,
                     fulltitle=item.fulltitle,
                     url=url,
                     thumbnail=item.thumbnail,
                     plot=item.plot,
                     server=server,
                     extra=item.extra,
                     folder=False))
        else:
            data = scrapertools.cache_page(playurl)
            patron = '<ul class="form-login">(.*?)</ul'
            matches = re.compile(patron, re.S).findall(data)
            if (len(matches) > 0):
                if "xbmc" in config.get_platform():
                    data = matches[0]
                    #buscamos la public key
                    patron = 'src="http://www.google.com/recaptcha/api/noscript\?k=([^"]+)"'
                    pkeys = re.compile(patron, re.S).findall(data)
                    if (len(pkeys) > 0):
                        pkey = pkeys[0]
                        #buscamos el id de challenge
                        data = scrapertools.cache_page(
                            "http://www.google.com/recaptcha/api/challenge?k="
                            + pkey)
                        patron = "challenge.*?'([^']+)'"
                        challenges = re.compile(patron, re.S).findall(data)
                        if (len(challenges) > 0):
                            challenge = challenges[0]
                            image = "http://www.google.com/recaptcha/api/image?c=" + challenge

                            #CAPTCHA
                            exec "import pelisalacarta.captcha as plugin"
                            tbd = plugin.Keyboard("", "", image)
                            tbd.doModal()
                            confirmed = tbd.isConfirmed()
                            if (confirmed):
                                tecleado = tbd.getText()
                                logger.info("tecleado=" + tecleado)
                                sendcaptcha(playurl, challenge, tecleado)
                            del tbd
                            #tbd ya no existe
                            if (confirmed and tecleado != ""):
                                itemlist = play(item)
                else:
                    itemlist.append(
                        Item(channel=item.channel,
                             action="error",
                             title="El sitio web te requiere un captcha"))

    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
    logger.info("len(itemlist)=%s" % len(itemlist))
    return itemlist
Пример #21
0
def findvideos(item):
    logger.info("pelisalacarta.channels.seriecanal findvideos")
    itemlist = []

    data = scrapertools.cachePage(item.url)
    data = scrapertools.decodeHtmlentities(data)
    #Busca en la seccion descarga/torrent
    data_download = scrapertools.get_match(data, '<th>Enlaces de Descarga mediante P2P o DD</th>(.*?)</table>')
    patron  = '<p class="item_name"><a href="([^"]+)".*?">([^"]+)</a>'
    patron += '[^=]+.*?<a.*?">([^"]+)</a>'
    matches = re.compile(patron,re.DOTALL).findall(data_download)
    scrapertools.printMatches(matches)
    for scrapedurl, scrapedepi, scrapedname in matches:
        scrapedtitle = " - Episodio "+scrapedepi+" - "+scrapedname
        scrapedtitle = scrapertools.htmlclean(scrapedtitle)
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"]")
        if scrapedurl.find("magnet") != -1:
            itemlist.append( Item(channel=__channel__, action="play" , title="[Torrent]" + scrapedtitle, url=scrapedurl, extra="torrent"))
        else:
            itemlist.append(servertools.find_video_items(data=scrapedurl))
            for videoitem in itemlist:
                videoitem.channel=__channel__
                videoitem.action="play"
                videoitem.folder=False
                videoitem.title = "["+videoitem.server+"]" + scrapedtitle

    #Busca en la seccion online
    data_online = scrapertools.get_match(data, '<th>Enlaces de Visionado Online</th>(.*?)</table>')
    patron  = '<a href="([^"]+)\\n.*?src="([^"]+)".*?'
    patron += 'title="Enlace de Visionado Online">([^"]+)</a>'
    matches = re.compile(patron,re.DOTALL).findall(data_online)
    scrapertools.printMatches(matches)
    videolist = []
    for scrapedurl, scrapedthumb, scrapedtitle in matches:
        #Deshecha enlaces de trailers
        if scrapedthumb != "images/series/youtube.png":
            #Extrae url de enlace bit.ly
            url = scrapertools.getLocationHeaderFromResponse(scrapedurl)
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            videolist = servertools.find_video_items(data=url)
            for videoitem in videolist:
                videoitem.channel=__channel__
                videoitem.action="play"
                videoitem.folder=False
                videoitem.title = "["+videoitem.server+"]" + " - " + scrapedtitle
                itemlist.append( Item(channel=__channel__, action="play" , extra=videoitem.server, title=videoitem.title, url=videoitem.url))				

    data_temp = scrapertools.get_match(data, '<div class="panel panel-success">(.*?)</table>')
    data_temp = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data_temp)
    data_notemp = scrapertools.find_single_match(data_temp, '<td colspan="7"(.*?)</table>')
    #Comprueba si hay otras temporadas
    if len(data_notemp) == 0:
        patron  = '<tr><td><p class="item_name"><a href="([^"]+)".*?'
        patron += '<p class="text-success"><strong>([^"]+)</strong>'
        matches = re.compile(patron,re.DOTALL).findall(data_temp)
        scrapertools.printMatches(matches)
        for scrapedurl, scrapedtitle in matches:
            url = urlparse.urljoin(URL_BASE, scrapedurl)
            itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , url=url , folder=True))

    return itemlist
def play(item):
    logger.info("[seriesyonkis.py] play")
    itemlist = []
    
    # Descarga la página de reproducción de este episodio y server
    #<a href="/s/y/597157/0/s/1244" target="_blank">Reproducir ahora</a>
    logger.info("[seriesyonkis.py] play url="+item.url)
    data = scrapertools.cache_page(item.url)
    patron = '<a href="([^"]+)" target="_blank">\s*Reproducir ahora\s*</a>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)==0:
        patron = '<a href="([^"]+)" target="_blank">\s*Descargar ahora\s*</a>'
        matches = re.compile(patron,re.DOTALL).findall(data)
    
    if len(matches)==0:
        logger.info("[seriesyonkis.py] play ERROR, no encuentro el enlace 'Reproducir ahora' o 'Descargar ahora'")
        return []
    
    playurl = urlparse.urljoin(item.url,matches[0])
    logger.info("[seriesyonkis.py] play url="+playurl)

    try:
        location = scrapertools.getLocationHeaderFromResponse(playurl)
        logger.info("[seriesyonkis.py] play location="+location)

        if location<>"":
            logger.info("[seriesyonkis.py] Busca videos conocidos en la url")
            videos = servertools.findvideos(location)
            
            if len(videos)==0:
                location = scrapertools.getLocationHeaderFromResponse(location)
                logger.info("[seriesyonkis.py] play location="+location)

                if location<>"":
                    logger.info("[seriesyonkis.py] Busca videos conocidos en la url")
                    videos = servertools.findvideos(location)
                    
                    if len(videos)==0:
                        logger.info("[seriesyonkis.py] play downloading location")
                        data = scrapertools.cache_page(location)
                        logger.info("------------------------------------------------------------")
                        #logger.info(data)
                        logger.info("------------------------------------------------------------")
                        videos = servertools.findvideos(data) 
                        logger.info(str(videos))
                        logger.info("------------------------------------------------------------")
        else:
            logger.info("[seriesyonkis.py] play location vacía")
            videos=[]

        if(len(videos)>0): 
            url = videos[0][1]
            server=videos[0][2]                   
            itemlist.append( Item(channel=item.channel, action="play" , title=item.title, fulltitle=item.fulltitle , url=url, thumbnail=item.thumbnail, plot=item.plot, server=server, extra=item.extra, folder=False))
        else:
            data = scrapertools.cache_page(playurl)
            patron='<ul class="form-login">(.*?)</ul'
            matches = re.compile(patron, re.S).findall(data)
            if(len(matches)>0):
                if "xbmc" in config.get_platform():
                    data = matches[0]
                    #buscamos la public key
                    patron='src="http://www.google.com/recaptcha/api/noscript\?k=([^"]+)"'
                    pkeys = re.compile(patron, re.S).findall(data)
                    if(len(pkeys)>0):
                        pkey=pkeys[0]
                        #buscamos el id de challenge
                        data = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+pkey)
                        patron="challenge.*?'([^']+)'"
                        challenges = re.compile(patron, re.S).findall(data)
                        if(len(challenges)>0):
                            challenge = challenges[0]
                            image = "http://www.google.com/recaptcha/api/image?c="+challenge
                            
                            #CAPTCHA
                            exec "import pelisalacarta.captcha as plugin"
                            tbd = plugin.Keyboard("","",image)
                            tbd.doModal()
                            confirmed = tbd.isConfirmed()
                            if (confirmed):
                                tecleado = tbd.getText()
                                logger.info("tecleado="+tecleado)
                                sendcaptcha(playurl,challenge,tecleado)
                            del tbd 
                            #tbd ya no existe
                            if(confirmed and tecleado != ""):
                                itemlist = play(item)
                else:
                    itemlist.append( Item(channel=item.channel, action="error", title="El sitio web te requiere un captcha") )

    except:
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )
    logger.info("len(itemlist)=%s" % len(itemlist))
    return itemlist