示例#1
0
def findvideos(item):
    logger.info()
    itemlist = []
    list_opciones = []
    IDIOMAS = {"banderita1": "Español", "banderita2": "VOSE", "banderita3": "Latino"}

    url = "http://estrenosli.org/ver-online-" + item.url

    data = httptools.downloadpage(url).data
    data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)

    patron = '<div class="content"><a href="([^"]+).*?'
    patron += '<div class="content_mini"><span class="([^"]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, banderita in matches:
        idioma = ""
        if banderita in IDIOMAS:
            idioma = " [%s]" % IDIOMAS[banderita]

        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

        if item.extra == 'multi-episodie':
            patron = '<div class="linksDescarga"><span class="titulo">Video Online:([^<]+).*?<a href="([^"]+)'
            matches = re.compile(patron, re.DOTALL).findall(data)
            for capitulo, url in matches:
                s = servertools.findvideos(url, skip=True)
                if s:
                    itemlist.append(item.clone(url=s[0][1], action="play", folder=False, server=s[0][2],
                                               title="Ver %s en %s%s" % (
                                                   capitulo.strip(), s[0][2].capitalize(), idioma),
                                               thumbnail2=item.thumbnail,
                                               thumbnail=config.get_thumb("server_" + s[0][2] + ".png")))
        else:
            import os
            for s in servertools.findvideos(data):
                itemlist.append(item.clone(url=s[1], action="play", folder=False, server=s[2],
                                           title="Ver en %s%s" % (s[2].capitalize(), idioma),
                                           thumbnail2=item.thumbnail,
                                           thumbnail=os.path.join(config.get_runtime_path(), "resources", "media",
                                                                  "servers", "server_" + s[2] + ".png")))

    # Insertar items "Buscar trailer" y "Añadir a la videoteca"
    if itemlist and item.extra == "movie":
        if item.contentQuality:
            title = "%s [%s]" % (item.contentTitle, item.contentQuality)
        else:
            title = item.contentTitle

        itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
                                      text_color=color3, title=title, viewmode="list"))

        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
                                 action="add_pelicula_to_library", url=item.url, text_color="green",
                                 contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))

    return itemlist
示例#2
0
def findvideos(item):
    logger.info()
    itemlist = []
    list_opciones = []
    IDIOMAS = {"banderita1": "Español", "banderita2": "VOSE", "banderita3": "Latino"}

    url = "http://estrenosli.org/ver-online-" + item.url

    data = httptools.downloadpage(url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

    patron = '<div class="content"><a href="([^"]+).*?'
    patron += '<div class="content_mini"><span class="([^"]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, banderita in matches:
        idioma = ""
        if banderita in IDIOMAS:
            idioma = " [%s]" % IDIOMAS[banderita]

        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

        if item.extra == 'multi-episodie':
            patron = '<div class="linksDescarga"><span class="titulo">Video Online:([^<]+).*?<a href="([^"]+)'
            matches = re.compile(patron, re.DOTALL).findall(data)
            for capitulo, url in matches:
                s = servertools.findvideos(url, skip=True)
                if s:
                    itemlist.append(item.clone(url=s[0][1], action="play", folder=False, server=s[0][2],
                                           title="Ver %s en %s%s" % (capitulo.strip(), s[0][2].capitalize(), idioma),
                                           thumbnail2=item.thumbnail,
                                           thumbnail="http://media.tvalacarta.info/servers/server_" + s[0][2] + ".png"))
        else:
            for s in servertools.findvideos(data):
                itemlist.append(item.clone(url=s[1], action="play", folder=False, server=s[2],
                                       title="Ver en %s%s" % (s[2].capitalize(), idioma),
                                       thumbnail2=item.thumbnail,
                                       thumbnail="http://media.tvalacarta.info/servers/server_" + s[2] + ".png"))


    # Insertar items "Buscar trailer" y "Añadir a la biblioteca"
    if itemlist and item.extra == "movie":
        if item.contentQuality:
            title = "%s [%s]" % (item.contentTitle, item.contentQuality)
        else:
            title = item.contentTitle

        itemlist.insert(0, item.clone(channel = "trailertools", action="buscartrailer",
                                      text_color=color3, title=title, viewmode="list"))

        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
                                 action="add_pelicula_to_library", url=item.url, text_color="green",
                                 contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))


    return itemlist
def detail(item):
    logger.info("[Descarregadirecta.py] detail")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot
    scrapedurl = ""
    url = item.url

    itemlist = []

    # Descarga la p�gina
    data = scrapertools.cachePage(url)
    
    # Usa findvideos    
    listavideos = servertools.findvideos(data)
    
    itemlist = []
    
    for video in listavideos:
        server = video[2]
        scrapedtitle = item.title + " [" + server + "]"
        scrapedurl = video[1]
        
        itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))



    return itemlist
示例#4
0
def play(item):
    logger.info("pelisalacarta.bricocine findvideos")

    itemlist = servertools.find_video_items(data=item.url)
    data = scrapertools.cache_page(item.url)

    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = item.url
        server = video[2]

        # xbmctools.addnewvideo( item.channel , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append(
            Item(
                channel=item.channel,
                action="play",
                server=server,
                title="Trailer - " + videotitle,
                url=url,
                thumbnail=item.thumbnail,
                plot=item.plot,
                fulltitle=item.title,
                fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg",
                folder=False,
            )
        )

    return itemlist
def findvideos(item):
    logger.info("[guaridavalencianista.py] findvideos")
    data = scrapertools.cachePage(item.url)

    # Busca los enlaces a los videos

    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = video[0].strip() + " - " + item.title.strip()
        scrapedurl = video[1]
        server = video[2]

        itemlist.append(
            Item(
                channel=item.channel,
                title=scrapedtitle,
                action="play",
                server=server,
                url=scrapedurl,
                thumbnail=item.thumbnail,
                show=item.show,
                plot=item.plot,
                folder=False,
            )
        )

    return itemlist
示例#6
0
def detail(item):
    logger.info("[Descarregadirecta.py] detail")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot
    scrapedurl = ""
    url = item.url

    itemlist = []

    # Descarga la p�gina
    data = scrapertools.cachePage(url)

    # Usa findvideos
    listavideos = servertools.findvideos(data)

    itemlist = []

    for video in listavideos:
        server = video[2]
        scrapedtitle = item.title + " [" + server + "]"
        scrapedurl = video[1]

        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 server=server,
                 folder=False))

    return itemlist
示例#7
0
def play(item):
    logger.info()

    itemlist = servertools.find_video_items(data=item.url)
    data = scrapertools.cache_page(item.url)

    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = item.url
        server = video[2]

        # xbmctools.addnewvideo( item.channel , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 server=server,
                 title="Trailer - " + videotitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 fulltitle=item.title,
                 fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg",
                 folder=False))

    return itemlist
示例#8
0
def list_single_site(queue, item):
    logger.info(" list_single_site")
    channelitemlist = []
    try:
        # logger.info(item.channel + " start channel search " + time.strftime("%Y-%m-%d %H:%M:%S"))
        module_to_call = getattr(__import__("channels"), item.channel)
        channelitemlist = module_to_call.findvideos(item)
        queue.put(channelitemlist)
        # logger.info(item.channel + " end channel search " + time.strftime("%Y-%m-%d %H:%M:%S"))
    except:
        try:
            # logger.info(item.channel + " start servertools search " + time.strftime("%Y-%m-%d %H:%M:%S"))
            # logger.info("no findvideos defined in channel functions, calling servertools.findvideos to find links")
            servertools_itemlist = []
            headers = [['Referer', item.channel]]
            data = httptools.downloadpage(item.url, headers=headers).data
            list_servertools = servertools.findvideos(data)
            for item_servertools in list_servertools:
                servertools_itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         fulltitle=item.title,
                         server=item_servertools[0],
                         thumbnail=item_servertools[3],
                         title=item.title,
                         url=item_servertools[1]))
            queue.put(servertools_itemlist)
            # logger.info(item.channel + " end servertools search " + time.strftime("%Y-%m-%d %H:%M:%S"))
        except Exception, e:
            logger.error('exception in list_single_site: ' + str(e))
def findvideos(item):
    logger.info()

    itemlist = []
    language = ''
    quality = ''
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  httptools.downloadpage(item.url).data)
    data = re.sub(r"&quot;", '"', data)
    data = re.sub(r"&lt;", '<', data)

    titles = re.compile('data-TPlayerNv="Opt\d+">.*? <span>(.*?)</span></li>',
                        re.DOTALL).findall(data)
    urls = re.compile('id="Opt\d+"><iframe[^>]+src="([^"]+)"',
                      re.DOTALL).findall(data)

    if len(titles) == len(urls):
        for i in range(0, len(titles)):
            if i > 0:
                logger.debug('titles: %s' % titles[i].strip())
                language, quality = titles[i].split(' - ')
                title = "%s" % titles[i].strip()
            else:
                title = titles[0]

            if "goo.gl" in urls[i]:
                urls[i] = httptools.downloadpage(urls[i], follow_redirects=False, only_headers=True)\
                    .headers.get("location", "")
            videourl = servertools.findvideos(urls[i])
            if len(videourl) > 0:
                server = videourl[0][0].capitalize()
                title = '%s %s' % (server, title)
                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title=title,
                         url=videourl[0][1],
                         server=server,
                         thumbnail=videourl[0][3],
                         fulltitle=item.title,
                         language=language,
                         quality=quality))

    pattern = '<a[^>]+href="([^"]+)"[^<]+</a></td><td><span><img[^>]+>(.*?)</span></td><td><span><img[^>]+>(.*?)' \
              '</span></td><td><span>(.*?)</span>'
    torrents = re.compile(pattern, re.DOTALL).findall(data)

    if len(torrents) > 0:
        for url, text, lang, quality in torrents:
            title = "%s %s - %s" % (text, lang, quality)
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     url=url,
                     server="torrent",
                     fulltitle=item.title,
                     thumbnail=get_thumb("channels_torrent.png")))

    return itemlist
示例#10
0
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    
    try:
        data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    except:
        pass

    url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
    if not url_redirect:
        try:
            import StringIO
            compressedstream = StringIO.StringIO(data)
            import gzip
            gzipper = gzip.GzipFile(fileobj=compressedstream)
            data = gzipper.read()
            gzipper.close()
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
        except:
            pass

    
    location = scrapertools.get_header_from_response(url_redirect, headers=CHANNEL_HEADERS[:2], header_to_get="location")
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#11
0
def findvideos(item):
    logger.info("deportesalacarta.channels.socceryou findvideos")
    itemlist = []
    data = scrapertools.downloadpage(item.url)

    videos = scrapertools.find_single_match(
        data, "<div class='video_button'>(.*?)</div>")
    matches = scrapertools.find_multiple_matches(videos,
                                                 "href='([^']+)'>([^<]+)</a>")
    i = 0
    for scrapedurl, scrapedtitle in matches:
        if not scrapedurl.startswith("http"):
            scrapedurl = host_soccer + scrapedurl
        scrapedtitle = scrapedtitle.strip()
        if i > 0:
            data = scrapertools.downloadpage(scrapedurl)
        videos_list = servertools.findvideos(data)
        if not videos_list:
            continue
        videos_list = videos_list[0]
        scrapedtitle += " [%s]" % videos_list[2]
        itemlist.append(
            item.clone(title=scrapedtitle,
                       action="play",
                       url=videos_list[1],
                       server=videos_list[2]))

    return itemlist
示例#12
0
def enlaces(item):
    logger.info("pelisalacarta.channels.descargasmix enlaces")
    itemlist = []
    data = scrapertools.downloadpage(item.url)

    urls = mostrar_enlaces(item.extra)
    numero = len(urls)
    for enlace in urls:
        enlaces = servertools.findvideos(data=enlace)
        if len(enlaces) > 0:
            for link in enlaces:
                if "/folder/" in enlace:
                    titulo = link[0]
                else:
                    titulo = item.title.split("-")[0] + " - Enlace " + str(
                        numero)
                    numero -= 1
                itemlist.append(
                    item.clone(action="play",
                               server=link[2],
                               title=titulo,
                               url=link[1]))

    itemlist.sort(key=lambda item: item.title)
    return itemlist
示例#13
0
def findvideos(item):
    logger.info()
    data = scrapertools.cachePage(item.url)

    # Busca los enlaces a los videos

    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = video[0].strip() + " - " + item.title.strip()
        scrapedurl = video[1]
        server = video[2]

        itemlist.append(
            Item(channel=item.channel,
                 title=scrapedtitle,
                 action="play",
                 server=server,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 plot=item.plot,
                 folder=False))

    return itemlist
示例#14
0
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = scrapertools.find_single_match(data,
                                          'src="(' + HOST + '/show/[^"]+)"')
    data = httptools.downloadpage(
        data,
        headers=[['User-Agent', 'Mozilla/5.0'],
                 ['Accept-Encoding', 'gzip, deflate'], ['Referer', HOST],
                 ['Connection', 'keep-alive']]).data
    videoUrl = scrapertools.find_single_match(data, '<IFRAME SRC="([^"]+)"')
    goo = scrapertools.find_single_match(videoUrl, '://([^/]+)/')
    if (goo == 'goo.gl'):
        videoUrl = httptools.downloadpage(
            videoUrl, follow_redirects=False,
            only_headers=True).headers["location"]
        server = scrapertools.find_single_match(videoUrl, '://([^/]+)/')
    #logger.info("videoUrl = "+videoUrl)
    enlaces = servertools.findvideos(videoUrl)
    if enlaces:
        thumbnail = servertools.guess_server_thumbnail(videoUrl)
        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=item.title,
                 fulltitle=item.fulltitle,
                 url=enlaces[0][1],
                 server=enlaces[0][2],
                 thumbnail=thumbnail,
                 folder=False))

    return itemlist
示例#15
0
def play(item):
    logger.info()

    itemlist = []

    # Buscamos video por servidor ...

    devuelve = servertools.findvideosbyserver(item.url, item.server)

    if not devuelve:
        # ...sino lo encontramos buscamos en todos los servidores disponibles

        devuelve = servertools.findvideos(item.url, skip=True)

    if devuelve:
        # logger.debug(devuelve)
        itemlist.append(
            Item(channel=item.channel,
                 title=item.contentTitle,
                 action="play",
                 server=devuelve[0][2],
                 url=devuelve[0][1],
                 thumbnail=item.thumbnail,
                 folder=False))

    return itemlist
示例#16
0
def play(item):
    logger.info()
    itemlist = []

    data = scrapertools.cache_page(item.url)
    #logger.info("data="+data)

    listavideos = servertools.findvideos(data)
    for video in listavideos:
        scrapedtitle = item.title + video[0]
        videourl = video[1]
        server = video[2]
        logger.debug("title=[" + scrapedtitle + "], url=[" + videourl + "]")

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=scrapedtitle,
                 fulltitle=item.fulltitle,
                 url=videourl,
                 server=server,
                 folder=False))

    return itemlist
示例#17
0
def play(item):
    logger.info("pelisalacarta.channels.descargasmix play")
    itemlist = []
    if "enlacesmix.com" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        if not item.url.startswith("http:"):
            item.url = "http:" + item.url
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'iframe src="([^"]+)"')
         
        enlaces = servertools.findvideos(data=item.url)[0]
        if len(enlaces) > 0:
            itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    elif item.server == "directo":
        global DEFAULT_HEADERS
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
        DEFAULT_HEADERS[1][1] = item.url
        calidades = ["1080p", "720p", "480p", "360p"]
        for i in range(0, len(calidades)):
            url_redirect = scrapertools.find_single_match(data, "{file:'([^']+)',label:'"+calidades[i]+"'")
            if url_redirect:
                url_video = scrapertools.get_header_from_response(url_redirect, header_to_get="location", headers=DEFAULT_HEADERS)
                if url_video:
                    url_video = url_video.replace(",", "%2C")
                    itemlist.append(item.clone(url=url_video, subtitle=subtitulo))
                    break
    else:
        itemlist.append(item.clone())
    
    return itemlist
示例#18
0
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    
    try:
        data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    except:
        pass

    url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
    if not url_redirect:
        try:
            import StringIO
            compressedstream = StringIO.StringIO(data)
            import gzip
            gzipper = gzip.GzipFile(fileobj=compressedstream)
            data = gzipper.read()
            gzipper.close()
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
        except:
            pass

    
    location = scrapertools.get_header_from_response(url_redirect, headers=CHANNEL_HEADERS[:2], header_to_get="location")
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#19
0
def findvideos(item):
    logger.info()
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(
        data,
        "<div class='post-body entry-content'(.*?)<div class='post-footer'>")

    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = video[1]
        server = video[2]
        # xbmctools.addnewvideo( item.channel , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 server=server,
                 title=videotitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 fulltitle=item.title,
                 folder=False))

    return itemlist
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist=[]

    data2 = item.url

    if data2.startswith("javascript"):

        item.url = scrapertools.find_single_match(data2,"window.open\('([^']+)'")
        data2 = scrapertools.cache_page(item.url)
    
    logger.info("data2="+data2)
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")

    data2 = data2.replace("http://peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/vk/play.php?url=","http://vk.com/video_ext.php?oid=")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/ttv/play.php?url=","http://www.tumi.tv/")

    data2 = data2.replace("http://peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/tumi.php?url=","http://www.tumi.tv/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/playerto.php?url=","http://played.to/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/videoweed.php?url=","http://www.videoweed.es/file/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/netu.php?url=","http://netu.tv/watch_video.php?v=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/powvideo.php?url=","http://powvideo.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/streamin.php?url=","http://streamin.to/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidspot.php?url=","http://vidspot.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/allmy.php?url=","http://allmyvideos.net/")
    data2 = data2.replace('http://peliculasaudiolatino.com/show/r"></iframe>url=',"http://realvid.net/")

    data2 = data2.replace("http://peliculasaudiolatino.com/show/roc.php?url=","http://rocvideo.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vide.php?url=","http://thevideo.me/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidto.php?url=","http://vidto.me/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vodlocker.php?url=","http://vodlocker.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/videomega.php?url=","http://videomega.tv/?ref=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/gamo.php?url=","http://gamovideo.com/")
    data2 = data2.replace("%26","&")
    logger.info("data2="+data2)

    listavideos = servertools.findvideos(data2)
    for video in listavideos:
        scrapedtitle = item.title+video[0]
        videourl = video[1]
        server = video[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
    
    return itemlist
示例#21
0
def play(item):
    logger.info()
    from core import servertools

    devuelve = servertools.findvideos(item.url, True)
    if devuelve:
        item.url = devuelve[0][1]
        item.server = devuelve[0][2]

    return [item]
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    
    item.url = item.url.replace("enlaces.php?op=", "enlace.php?u=")
    location = scrapertools.get_header_from_response(item.url, headers=CHANNEL_HEADERS, header_to_get="location")
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#23
0
def _parse_gplus(html):
    sources = []
    match = re.search('<c-wiz.+?track:impression,click".*?jsdata\s*=\s*".*?(http[^"]+)"', html, re.DOTALL)
    if match:
        source = match.group(1).replace('&amp;', '&').split(';')[0]
        resolved = servertools.findvideos(data=source, skip=True)
        if resolved:
            resolved, ok, msg = servertools.resolve_video_urls_for_playing(url=resolved[0][1], server=resolved[0][2])
            if ok:
                sources.append(['Unknown Quality', resolved[0][1]])
    return sources
示例#24
0
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    
    item.url = item.url.replace("enlaces.php?op=", "enlace.php?u=")
    location = scrapertools.get_header_from_response(item.url, headers=CHANNEL_HEADERS, header_to_get="location")
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
def _parse_gplus(html):
    sources = []
    match = re.search('<c-wiz.+?track:impression,click".*?jsdata\s*=\s*".*?(http[^"]+)"', html, re.DOTALL)
    if match:
        source = match.group(1).replace('&amp;', '&').split(';')[0]
        resolved = servertools.findvideos(data=source, skip=True)
        if resolved:
            resolved, ok, msg = servertools.resolve_video_urls_for_playing(url=resolved[0][1], server=resolved[0][2])
            if ok:
                sources.append(['Unknown Quality', resolved[0][1]])
    return sources
示例#26
0
def epienlaces(item):
    logger.info("pelisalacarta.channels.descargasmix epienlaces")
    itemlist = []
    item.text_color = color3
    
    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")

    #Bloque de enlaces
    delimitador = item.extra.strip()
    delimitador = re.sub(r'(?i)(\[(?:/|)Color.*?\])', '', delimitador)
    patron = '<div class="cap">'+delimitador+'(.*?)(?:<div class="polo"|</li>)'
    bloque = scrapertools.find_single_match(data, patron)
     
    patron = '<div class="episode-server">.*?href="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(item.clone(action="", title="Enlaces de Descarga/Online", text_color=color1))
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    " + scrapedserver.capitalize() + " [" + scrapedcalidad + "]"
        #Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(0, item.clone(action="play", title=titulo, server="torrent", url=scrapedurl))
        else:
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+scrapedserver)
                    if "enlacesmix.com" in scrapedurl:
                        itemlist.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
                                                   extra=item.url))
                    else:
                        enlaces = servertools.findvideos(data=scrapedurl)
                        if len(enlaces) > 0:
                            titulo = "    " + enlaces[0][2].capitalize() + "  [" + scrapedcalidad + "]"
                            itemlist.append(item.clone(action="play", server=enlaces[0][2], title=titulo,
                                                       url=enlaces[0][1]))
                except:
                    pass

    if itemlist[0].server == "torrent":
        itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))

    return itemlist
示例#27
0
def epienlaces(item):
    logger.info("pelisalacarta.channels.descargasmix epienlaces")
    itemlist = []
    item.text_color = color3
    
    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")

    #Bloque de enlaces
    delimitador = item.extra.strip()
    delimitador = re.sub(r'(?i)(\[(?:/|)Color.*?\])', '', delimitador)
    patron = '<div class="cap">'+delimitador+'(.*?)(?:<div class="polo"|</li>)'
    bloque = scrapertools.find_single_match(data, patron)
     
    patron = '<div class="episode-server">.*?href="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(item.clone(action="", title="Enlaces de Descarga/Online", text_color=color1))
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    " + scrapedserver.capitalize() + " [" + scrapedcalidad + "]"
        #Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(0, item.clone(action="play", title=titulo, server="torrent", url=scrapedurl))
        else:
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+scrapedserver)
                    if "enlacesmix.com" in scrapedurl:
                        itemlist.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
                                                   extra=item.url))
                    else:
                        enlaces = servertools.findvideos(data=scrapedurl)
                        if len(enlaces) > 0:
                            titulo = "    " + enlaces[0][2].capitalize() + "  [" + scrapedcalidad + "]"
                            itemlist.append(item.clone(action="play", server=enlaces[0][2], title=titulo,
                                                       url=enlaces[0][1]))
                except:
                    pass

    if itemlist[0].server == "torrent":
        itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))

    return itemlist
示例#28
0
def play(item):
    logger.info()
    itemlist=[]

    player = httptools.downloadpage(item.url,item.extra).data
    video = scrapertools.find_single_match(player,'<iframe class="embed-responsive-item" src="([^"]+)"')
    #logger.info("video="+video)
    enlaces = servertools.findvideos(video)
    if enlaces:    	
    	thumbnail = servertools.guess_server_thumbnail(video)
    	# Añade al listado de XBMC
    	itemlist.append( Item(channel=item.channel, action="play", title=item.title , fulltitle=item.fulltitle, url=enlaces[0][1] , server=enlaces[0][2], thumbnail=thumbnail, folder=False) )
    
    return itemlist	
示例#29
0
def play(item):
    logger.info("pelisalacarta.channels.sipeliculas play")
    itemlist=[]

    video = httptools.downloadpage(host+'/ajax.public.php','acc=ver_opc&f='+item.extra).data
    logger.info("video="+video)
    enlaces = servertools.findvideos(video)
    if enlaces:
    	logger.info("server="+enlaces[0][2])
    	thumbnail = servertools.guess_server_thumbnail(video)
    	# Añade al listado de XBMC
    	itemlist.append( Item(channel=item.channel, action="play", title=item.title , fulltitle=item.fulltitle, url=enlaces[0][1] , server=enlaces[0][2], thumbnail=thumbnail, folder=False) )
    
    return itemlist	
示例#30
0
def play(item):
    logger.info("pelisalacarta.channels.cinefox play")
    itemlist = []

    headers["Referer"] = item.url
    post = "id=%s" % item.extra
    data = scrapertools.downloadpage("http://www.cinefox.cc/goto/", post=post, headers=headers.items())

    url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')
    url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    enlaces = servertools.findvideos(url)[0]
    itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
    
    return itemlist
示例#31
0
def play(item):
    logger.info("pelisalacarta.channels.descargasmix play")
    itemlist = []
    if "enlacesmix.com" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'iframe src="([^"]+)"')
         
        enlaces = servertools.findvideos(data=item.url)[0]
        if len(enlaces) > 0:
            itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    else:
        itemlist.append(item.clone())
    
    return itemlist
示例#32
0
def play(item):
    logger.info("pelisalacarta.channels.descargasmix play")
    itemlist = []
    if "enlacesmix.com" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'iframe src="([^"]+)"')
         
        enlaces = servertools.findvideos(data=item.url)[0]
        if len(enlaces) > 0:
            itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    else:
        itemlist.append(item.clone())
    
    return itemlist
示例#33
0
def play(item):
    logger.info()
    from core import servertools

    devuelve = servertools.findvideos(item.url, True)
    if devuelve:
        item.url = devuelve[0][1]
        item.server = devuelve[0][2]

    if config.get_setting("mark_play", "playmax"):
        if item.contentType == "movie":
            marcar(item.clone(title="marcar como vista"))
        else:
            marcar(item.clone(title="Capítulo", epi_id=item.cid))

    return [item]
示例#34
0
def play(item):
    logger.info()
    itemlist = []

    url = item.url.replace("http://miracine.tv/n/?etu=",
                           "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    if item.server:
        enlaces = servertools.findvideosbyserver(url, item.server)
    else:
        enlaces = servertools.findvideos(url)

    if len(enlaces) == 0: return itemlist

    itemlist.append(item.clone(url=enlaces[0][1], server=enlaces[0][2]))
    return itemlist
示例#35
0
def play(item):
    logger.info()
    itemlist = []

    video = httptools.downloadpage(host + '/ajax.public.php', 'acc=ver_opc&f=' + item.extra).data
    logger.info("video=" + video)
    enlaces = servertools.findvideos(video)
    if enlaces:
        logger.info("server=" + enlaces[0][2])
        thumbnail = servertools.guess_server_thumbnail(video)
        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=enlaces[0][1],
                 server=enlaces[0][2], thumbnail=thumbnail, folder=False))

    return itemlist
示例#36
0
文件: lacajita.py 项目: yonvima/addon
def play(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data

    url = scrapertools.find_single_match(data, 'window.open\("([^"]+)"')
    enlaces = servertools.findvideosbyserver(url, item.server)
    if enlaces:
        itemlist.append(item.clone(action="play", url=enlaces[0][1]))
    else:
        enlaces = servertools.findvideos(url, True)
        if enlaces:
            itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#37
0
def play(item):
    logger.info()
    itemlist = []
    if item.extra == "torrent":
        itemlist.append(item.clone())
    else:
        # Extrae url de enlace bit.ly
        if item.url.startswith("http://bit.ly/"):
            item.url = scrapertools.getLocationHeaderFromResponse(item.url)
        video_list = servertools.findvideos(item.url)
        if video_list:
            url = video_list[0][1]
            server = video_list[0][2]
        itemlist.append(item.clone(server=server, url=url))

    return itemlist
示例#38
0
文件: cinefox.py 项目: fcammed/addon
def play(item):
    logger.info()
    itemlist = []
    if item.extra != "" and "google" not in item.url:
        post = "id=%s" % item.extra
        data = httptools.downloadpage(host + "/goto/", post=post, add_referer=True).data
        item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')

    url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    if item.server:
        enlaces = servertools.findvideosbyserver(url, item.server)[0]
    else:
        enlaces = servertools.findvideos(url)[0]
    itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
    return itemlist
示例#39
0
def play(item):
    logger.info()
    itemlist = []
    data = scrapertools.cache_page(item.url)
    videos = servertools.findvideos(data)  

    if(len(videos)>0): 
        url = videos[0][1]
        server=videos[0][2]                   
        itemlist.append( Item(channel=item.channel, action="play" , title=item.title, fulltitle=item.fulltitle , url=url, thumbnail=item.thumbnail, plot=item.plot, server=server, extra=item.extra, folder=False))
    else:
        patron='<ul class="form-login">(.*?)</ul'
        matches = re.compile(patron, re.S).findall(data)
        if(len(matches)>0):
            if "xbmc" in config.get_platform():
                data = matches[0]
                #buscamos la public key
                patron='src="http://www.google.com/recaptcha/api/noscript\?k=([^"]+)"'
                pkeys = re.compile(patron, re.S).findall(data)
                if(len(pkeys)>0):
                    pkey=pkeys[0]
                    #buscamos el id de challenge
                    data = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+pkey)
                    patron="challenge.*?'([^']+)'"
                    challenges = re.compile(patron, re.S).findall(data)
                    if(len(challenges)>0):
                        challenge = challenges[0]
                        image = "http://www.google.com/recaptcha/api/image?c="+challenge
                        
                        #CAPTCHA
                        exec "import platformcode.captcha as plugin"
                        tbd = plugin.Keyboard("","",image)
                        tbd.doModal()
                        confirmed = tbd.isConfirmed()
                        if (confirmed):
                            tecleado = tbd.getText()
                            logger.info("tecleado="+tecleado)
                            sendcaptcha(playurl,challenge,tecleado)
                        del tbd 
                        #tbd ya no existe
                        if(confirmed and tecleado != ""):
                            itemlist = play(item)
            else:
                itemlist.append( Item(channel=item.channel, action="error", title="El sitio web te requiere un captcha") )

    logger.info("len(itemlist)=%s" % len(itemlist))
    return itemlist
示例#40
0
def play(item):
    logger.info("[seriesyonkis.py] play")
    itemlist = []
    data = scrapertools.cache_page(item.url)
    videos = servertools.findvideos(data)  

    if(len(videos)>0): 
        url = videos[0][1]
        server=videos[0][2]                   
        itemlist.append( Item(channel=item.channel, action="play" , title=item.title, fulltitle=item.fulltitle , url=url, thumbnail=item.thumbnail, plot=item.plot, server=server, extra=item.extra, folder=False))
    else:
        patron='<ul class="form-login">(.*?)</ul'
        matches = re.compile(patron, re.S).findall(data)
        if(len(matches)>0):
            if "xbmc" in config.get_platform():
                data = matches[0]
                #buscamos la public key
                patron='src="http://www.google.com/recaptcha/api/noscript\?k=([^"]+)"'
                pkeys = re.compile(patron, re.S).findall(data)
                if(len(pkeys)>0):
                    pkey=pkeys[0]
                    #buscamos el id de challenge
                    data = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+pkey)
                    patron="challenge.*?'([^']+)'"
                    challenges = re.compile(patron, re.S).findall(data)
                    if(len(challenges)>0):
                        challenge = challenges[0]
                        image = "http://www.google.com/recaptcha/api/image?c="+challenge
                        
                        #CAPTCHA
                        exec "import platformcode.captcha as plugin"
                        tbd = plugin.Keyboard("","",image)
                        tbd.doModal()
                        confirmed = tbd.isConfirmed()
                        if (confirmed):
                            tecleado = tbd.getText()
                            logger.info("tecleado="+tecleado)
                            sendcaptcha(playurl,challenge,tecleado)
                        del tbd 
                        #tbd ya no existe
                        if(confirmed and tecleado != ""):
                            itemlist = play(item)
            else:
                itemlist.append( Item(channel=item.channel, action="error", title="El sitio web te requiere un captcha") )

    logger.info("len(itemlist)=%s" % len(itemlist))
    return itemlist
示例#41
0
def play(item):
    logger.info("pelisalacarta.channels.seriecanal play")
    itemlist = []

    if item.extra == "torrent":
        itemlist.append(item.clone())
    else:
        #Extrae url de enlace bit.ly
        if item.url.startswith("http://bit.ly/"):
            item.url = scrapertools.getLocationHeaderFromResponse(item.url)
        video_list = servertools.findvideos(item.url)
        if video_list:
            url = video_list[0][1]
            server = video_list[0][2]
        itemlist.append(item.clone(server=server, url=url))

    return itemlist
示例#42
0
def play(item):
    logger.info("pelisalacarta.channels.cinefox play")
    itemlist = []

    if item.extra != "":
        headers["Referer"] = item.url
        post = "id=%s" % item.extra
        data = scrapertools.downloadpage("http://www.cinefox.tv/goto/", post=post, headers=headers.items())

        item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')

    url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    enlaces = servertools.findvideos(url)[0]
    itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
    
    return itemlist
示例#43
0
def play(item):
    logger.info("pelisalacarta.channels.reyanime play")
    itemlist=[]

    data = scrapertools.cache_page(item.url)
    #logger.info("data="+data)

    listavideos = servertools.findvideos(data)
    for video in listavideos:
        scrapedtitle = item.title+video[0]
        videourl = video[1]
        server = video[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
    
    return itemlist
示例#44
0
def play(item):
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
        post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
        data = httptools.downloadpage(host+"/a/status", post=post).data

    devuelve = servertools.findvideosbyserver(item.url, item.server)
    if devuelve:
        item.url = devuelve[0][1]
    else:
        devuelve = servertools.findvideos(item.url, True)
        if devuelve:
            item.url = devuelve[0][1]
            item.server = devuelve[0][2]
    
    return [item]
示例#45
0
def play(item):
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
        post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
        data = httptools.downloadpage(host + "/a/status", post=post).data
    devuelve = servertools.findvideosbyserver(item.url, item.server)
    if devuelve:
        item.url = devuelve[0][1]
    else:
        devuelve = servertools.findvideos(item.url, True)
        if devuelve:
            item.url = devuelve[0][1]
            item.server = devuelve[0][2]
    item.thumbnail = item.contentThumbnail
    item.fulltitle = item.contentTitle
    return [item]
示例#46
0
def play(item):
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
        post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
        data = agrupa_datos(urlparse.urljoin(host, "/a/status"), post=post)
    devuelve = servertools.findvideosbyserver(item.url, item.server)
    if devuelve:
        item.url = devuelve[0][1]
    else:
        devuelve = servertools.findvideos(item.url, True)
        if devuelve:
            item.url = devuelve[0][1]
            item.server = devuelve[0][2]
    item.thumbnail = item.contentThumbnail
    item.contentTitle = item.contentTitle
    return [item]
示例#47
0
文件: support.py 项目: iz8mbw/addon
def server(item,
           data='',
           itemlist=[],
           headers='',
           AutoPlay=True,
           CheckLinks=True,
           down_load=True):

    if not data and not itemlist:
        data = httptools.downloadpage(item.url,
                                      headers=headers,
                                      ignore_response_code=True).data

    itemList = servertools.find_video_items(data=str(data))
    itemlist = itemlist + itemList

    verifiedItemlist = []
    for videoitem in itemlist:
        if not videoitem.server:
            videoitem.url = unshortenit.unshorten(videoitem.url)[0]
            findS = servertools.findvideos(videoitem.url)
            if findS:
                findS = findS[0]
            else:
                log(videoitem, 'Non supportato')
                continue
            videoitem.server = findS[2]
            videoitem.title = findS[0]
            videoitem.url = findS[1]
        item.title = item.contentTitle.strip(
        ) if item.contentType == 'movie' or (config.get_localized_string(30161)
                                             in item.title) else item.title
        videoitem.title = item.title + (typo(
            videoitem.title, '_ color kod []') if videoitem.title else "") + (
                typo(videoitem.quality, '_ color kod []')
                if videoitem.quality else "")
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = item.channel
        videoitem.contentType = item.contentType
        verifiedItemlist.append(videoitem)

    return controls(verifiedItemlist, item, AutoPlay, CheckLinks, down_load)
示例#48
0
def enlaces(item):
    logger.info()
    itemlist = []

    urls = mostrar_enlaces(item.extra)
    numero = len(urls)
    for enlace in urls:
        enlaces = servertools.findvideos(data=enlace)
        if enlaces:
            for link in enlaces:
                if "/folder/" in enlace:
                    titulo = link[0]
                else:
                    titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
                    numero -= 1
                itemlist.append(item.clone(action="play", server=link[2], title=titulo, url=link[1]))

    itemlist.sort(key=lambda it: it.title)
    return itemlist
示例#49
0
def play(item):
    logger.info()
    itemlist = []

    if item.extra != "":
        post = "id=%s" % item.extra
        data = httptools.downloadpage("http://www.cinefox.tv/goto/", post=post, add_referer=True).data

        item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')

    url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    if item.server:
        enlaces = servertools.findvideosbyserver(url, item.server)[0]
    else:
        enlaces = servertools.findvideos(url)[0]
    itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
    
    return itemlist
示例#50
0
def findvideos(item):
    logger.info("[discoverymx.py] findvideos")
    itemlist=[]
    
    # Descarga la página
    data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(data,"<div class='post-body entry-content'(.*?)<div class='post-footer'>")

    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = video[1]
        server = video[2]
        #xbmctools.addnewvideo( item.channel , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append( Item(channel=item.channel, action="play", server=server, title=videotitle , url=url , thumbnail=item.thumbnail , plot=item.plot , fulltitle = item.title , folder=False) )

    return itemlist
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist=[]

    data = httptools.downloadpage(item.url).data
    data = scrapertools.find_single_match(data,'src="('+HOST+'/show/[^"]+)"')
    data = httptools.downloadpage(data,headers=[['User-Agent', 'Mozilla/5.0'],['Accept-Encoding', 'gzip, deflate'],['Referer', HOST],['Connection', 'keep-alive']]).data
    videoUrl=scrapertools.find_single_match(data,'<IFRAME SRC="([^"]+)"')
    goo = scrapertools.find_single_match(videoUrl,'://([^/]+)/')
    if(goo == 'goo.gl'):
        videoUrl=httptools.downloadpage(videoUrl, follow_redirects=False, only_headers=True).headers["location"]
        server = scrapertools.find_single_match(videoUrl,'://([^/]+)/')
    #logger.info("videoUrl = "+videoUrl)
    enlaces = servertools.findvideos(videoUrl)
    if enlaces:
    	thumbnail = servertools.guess_server_thumbnail(videoUrl)
    	# Añade al listado de XBMC
    	itemlist.append( Item(channel=item.channel, action="play", title=item.title , fulltitle=item.fulltitle, url=enlaces[0][1] , server=enlaces[0][2], thumbnail=thumbnail, folder=False) )
    
    return itemlist
示例#52
0
def enlaces(item):
    logger.info("pelisalacarta.channels.descargasmix enlaces")
    itemlist = []
    data = scrapertools.downloadpage(item.url)

    urls = mostrar_enlaces(item.extra)
    numero = len(urls)
    for enlace in urls:
        enlaces = servertools.findvideos(data=enlace)
        if len(enlaces) > 0:
            for link in enlaces:
                if "/folder/" in enlace:
                    titulo = link[0]
                else:
                    titulo = item.title.split("-")[0]+" - Enlace "+str(numero)
                    numero -= 1
                itemlist.append(item.clone(action="play", server=link[2], title=titulo, url=link[1]))

    itemlist.sort(key=lambda item: item.title)
    return itemlist
示例#53
0
def enlaces(item):
    logger.info("pelisalacarta.channels.descargasmix enlaces")
    itemlist = []
    data = scrapertools.downloadpage(item.url)

    #Bloque de enlaces
    patron = "(dm\(c.a\('"+item.extra.replace("+", "\+")+"'.*?)</div>"
    data_enlaces = scrapertools.find_single_match(data, patron)
    patron = 'dm\(c.a\(\'([^\']+)\''
    matches = scrapertools.find_multiple_matches(data_enlaces, patron)
    numero = len(matches)
    for code in matches:
        enlace = dm(code)
        enlaces = servertools.findvideos(data=enlace)
        if len(enlaces) > 0:
            for link in enlaces:
                if "/folder/" in enlace:
                    titulo = link[0]
                else:
                    titulo = item.title.split("-")[0]+" - Enlace "+str(numero)
                    numero -= 1
                itemlist.append(item.clone(action="play", server=link[2], title=titulo, url=link[1]))
    itemlist.sort(key=lambda item: item.title)
    return itemlist
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist=[]

    data = scrapertools.cachePage(item.url)
    logger.info("data="+data)

    url = scrapertools.find_single_match(data,'src="(http://peliculasaudiolatino.com/show/[^"]+)"')
    logger.info("url="+url)

    data2 = scrapertools.cachePage(url)
    logger.info("data2="+data2)

    listavideos = servertools.findvideos(data2)
    for video in listavideos:
        scrapedtitle = item.title+video[0]
        videourl = video[1]
        server = video[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
    
    return itemlist
def buscartrailer(item, trailers=[]):
    logger.info("streamondemand.channels.trailertools buscartrailer")

    # Lista de acciones si se ejecuta desde el menú contextual
    if item.action == "manual_search" and item.contextual:
        itemlist = manual_search(item)
        item.contentTitle = itemlist[0].contentTitle
    elif 'search' in item.action and item.contextual:
        itemlist = globals()[item.action](item)
    else:
        # Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias
        if type(item.context) is str and "buscar_trailer" in item.context:
            item.context = item.context.replace("buscar_trailer", "")
        elif type(item.context) is list and "buscar_trailer" in item.context:
            item.context.remove("buscar_trailer")
        
        item.text_color = ""

        itemlist = []
        if item.contentTitle != "":
            item.contentTitle = item.contentTitle.strip()
        elif keyboard:
            fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
            item.contentTitle = platformtools.dialog_input(default=fulltitle, heading="Scrivi il titolo da cercare")
            if item.contentTitle is None:
                item.contentTitle = fulltitle
            else:
                item.contentTitle = item.contentTitle.strip()
        else:
            fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
            item.contentTitle = fulltitle
        
        item.year = item.infoLabels['year']

        logger.info("streamondemand.channels.trailertools Búsqueda: %s" % item.contentTitle)
        logger.info("streamondemand.channels.trailertools Año: %s" % item.year)
        if item.infoLabels['trailer'] and not trailers:
            url = item.infoLabels['trailer']
            if "youtube" in url:
                url = url.replace("embed/", "watch?v=")
            titulo, url, server = servertools.findvideos(url)[0]
            title = "Trailer por defecto  [" + server + "]"
            itemlist.append(item.clone(title=title, url=url, server=server, action="play"))
        if item.show or item.infoLabels['tvshowtitle'] or item.contentType != "movie":
            tipo = "tv"
        else:
            tipo = "movie"
        try:
            if not trailers:
                itemlist.extend(tmdb_trailers(item, tipo))
            else:
                for trailer in trailers:
                    title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING")\
                            .replace("es", "ESP")+")  [tmdb/youtube]"
                    itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube"))
        except:
            import traceback
            logger.error(traceback.format_exc())
            
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(item.clone(title=title % "Cerca su YouTube", action="youtube_search",
                                   text_color="green"))
        itemlist.append(item.clone(title=title % "Cerca su Filmaffinity",
                                   action="filmaffinity_search", text_color="green"))
        # Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez
        if not item.show and not item.infoLabels['tvshowtitle']:
            itemlist.append(item.clone(title=title % "Cerca su Abandomoviez",
                                       action="abandomoviez_search", text_color="green"))
        itemlist.append(item.clone(title=title % "Cerca su Jayhap (Youtube, Vimeo & Dailymotion)",
                                   action="jayhap_search", text_color="green"))

    if item.contextual:
        global window_select, result
        select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist, caption="Stai cercando: "+item.contentTitle)
        window_select.append(select)
        select.doModal()

        if item.windowed:
            return result, window_select
    else:
        return itemlist
示例#56
0
def findvideos(item):
    logger.info("pelisalacarta.multicineonline findvideos")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)




    #extra enlaces


    patron= '<div class="play-c">(.*?)<div class="datos">'

    matches = re.compile(patron,re.DOTALL).findall(data)

    if len(matches)==0:
        itemlist.append( Item(channel=item.channel, title="[COLOR orange][B]Sin servidores para Pelisalacarta...[/B][/COLOR]", thumbnail ="http://s6.postimg.org/55zljwr4h/sinnoisethumb.png", fanart ="http://s6.postimg.org/avfu47xap/sinnoisefan.jpg",folder=False) )

    for bloque_enlaces_idiomas in matches:
        patronenlaces= '<div id="play-(.*?)".*?src="([^"]+)"'
        matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(bloque_enlaces_idiomas)
        patronidiomas= '<a href="#play-(.*?)">([^<]+)'
        matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(bloque_enlaces_idiomas)
        for numero, scrapedurl in matchesenlaces:
            url=scrapedurl
            for numero2, idiomas in matchesidiomas:
                if numero == numero2:
                   title = idiomas
                   idiomas= re.sub(r"[0-9]","",idiomas)
                   listavideos = servertools.findvideos(url)
                   for video in listavideos:

                      #idiomas = idiomas.replace(idiomas,"[COLOR white]"+idiomas+"[/COLOR]")
                       videotitle = scrapertools.unescape(video[0]) #+"-"+idiomas
                       url = video[1]
                       server = video[2]
                       videotitle = videotitle.replace(videotitle,"[COLOR skyblue]"+videotitle+"[/COLOR]")
                       title_first="[COLOR gold]Ver en--[/COLOR]"
                       title= title_first + videotitle
                       if "sinopsis" in item.extra:
                          item.extra = item.thumbnail
                       itemlist.append( Item(channel=item.channel, action="play", server=server, title=title , url=url , thumbnail=item.extra , fulltitle = item.title , fanart = item.show, folder=False) )


        #otro patronenlaces
        patronenlaces= '<div id="play-(.*?)".*?src=\'([^\']+)\''
        matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(bloque_enlaces_idiomas)
        patronidiomas= '<a href="#play-(.*?)">([^<]+)'
        matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(bloque_enlaces_idiomas)
        for numero, url in matchesenlaces:
            pepe=url
            for numero2, idiomas in matchesidiomas:
                if numero == numero2:
                   title = idiomas
                   idiomas= re.sub(r"[0-9]","",idiomas)
                   listavideos = servertools.findvideos(pepe)
                   for video in listavideos:

                       #idiomas = idiomas.replace(idiomas,"[COLOR white]"+idiomas+"[/COLOR]")
                       videotitle = scrapertools.unescape(video[0]) #+"-"+idiomas
                       url = video[1]
                       server = video[2]
                       videotitle = videotitle.replace(videotitle,"[COLOR skyblue]"+videotitle+"[/COLOR]")
                       title_first="[COLOR gold]Ver en--[/COLOR]"
                       title= title_first + videotitle
                       if "sinopsis" in item.extra:
                           item.extra = item.thumbnail
                       itemlist.append( Item(channel=item.channel, action="play", server=server, title=title , url=url , thumbnail=item.extra , fulltitle = item.title , fanart = item.show, folder=False) )


        patron = '<em>opción \d+, ([^<]+)</em>.*?'
        # Datos que contienen los enlaces para sacarlos con servertools.findvideos
        patron+= '<div class="contenedor_tab">(.*?)<div style="clear:both;">'
        matches = re.compile(patron,re.DOTALL).findall(data)

        for idioma, datosEnlaces in matches:

            listavideos = servertools.findvideos(datosEnlaces)


            for video in listavideos:
                videotitle = scrapertools.unescape(video[0])+"-"+idioma
                url = video[1]
                server = video[2]
                videotitle = videotitle.replace(videotitle,"[COLOR skyblue]"+videotitle+"[/COLOR]")
                title_first="[COLOR gold]Ver en--[/COLOR]"
                title= title_first + videotitle
                idioma = idioma.replace(idioma,"[COLOR white]"+idioma+"[/COLOR]")
                if "sinopsis" in item.extra:
                    item.extra = item.thumbnail
                itemlist.append( Item(channel=item.channel, action="play", server=server, title=title , url=url , thumbnail=item.extra , fulltitle = item.title , fanart = item.show, folder=False) )









    return itemlist
def buscartrailer(item):
    logger.info("fusionse.channels.trailertools buscartrailer")

    # Se elimina la opciçon de Buscar Trailer del menú contextual para evitar redundancias
    item.context = item.context.replace("5", "")
    item.text_color = ""
    # Si no se indica el parámetro contextual se entiende que no se ejecuta desde este mení
    if item.contextual == "":
        item.contextual = False

    itemlist = []
    if item.contentTitle != "":
        item.contentTitle = item.contentTitle.strip()
    elif keyboard:
        item.contentTitle = platformtools.dialog_input(heading="Introduce el título a buscar")
        if item.contentTitle is None:
            item.contentTitle = item.fulltitle.strip()
        else:
            item.contentTitle = item.contentTitle.strip()
    else:
        item.contentTitle = item.fulltitle.strip()
        
    item.year = item.infoLabels['year'] if "year" in item.infoLabels else ""
    logger.info("fusionse.channels.trailertools Búsqueda: %s" % item.contentTitle)
    logger.info("fusionse.channels.trailertools Año: %s" % item.year)

    # Lista de acciones si se ejecuta desde el menú contextual
    if item.action == "manual_search":
        itemlist = manual_search(item)
        item.contentTitle = itemlist[0].contentTitle
    elif item.action == "youtube_search":
        itemlist = youtube_search(item)
    elif item.action == "filmaffinity_search":
        itemlist = filmaffinity_search(item)
    elif item.action == "abandomoviez_search":
        itemlist = abandomoviez_search(item)
    elif item.action == "jayhap_search":
        itemlist = jayhap_search(item)
    else:
        if "trailer" in item.infoLabels and item.infoLabels['trailer'] != "":
            url = item.infoLabels['trailer']
            if "youtube" in url:
                url = url.replace("embed/", "watch?v=")
            titulo, url, server = servertools.findvideos(url)[0]
            title = "Trailer por defecto  [" + server + "]"
            itemlist.append(item.clone(title=title, url=url, server=server, action="play"))
        if item.show != "" or ("tvshowtitle" in item.infoLabels and item.infoLabels['tvshowtitle'] != ""):
            type = "tv"
        else:
            type = "movie"
        try:
            itemlist.extend(tmdb_trailers(item, type))
        except:
            import traceback
            logger.error(traceback.format_exc())
        
        title = "[COLOR green]%s[/COLOR]" if item.contextual else "%s"
        itemlist.append(item.clone(title=title % "Búsqueda en Youtube", action="youtube_search",
                                   text_color="green"))
        itemlist.append(item.clone(title=title % "Búsqueda en Filmaffinity",
                                   action="filmaffinity_search", text_color="green"))
        # Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez
        if item.show == "" and ("tvshowtitle" not in item.infoLabels or item.infoLabels['tvshowtitle'] == ""):
            itemlist.append(item.clone(title=title % "Búsqueda en Abandomoviez",
                                       action="abandomoviez_search", text_color="green"))
        itemlist.append(item.clone(title=title % "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)",
                                   action="jayhap_search", text_color="green"))

    if item.contextual:
        opciones = []
        if itemlist:
            for video_url in itemlist:
                opciones.append(video_url.title)

            seleccion = platformtools.dialog_select("Buscando: "+item.contentTitle, opciones)
            logger.info("seleccion=%d" % seleccion)
            logger.info("seleccion=%s" % opciones[seleccion])

            if seleccion < 0:
                return
            else:
                item = itemlist[seleccion]
                if "search" in item.action:
                    buscartrailer(item)
                else:
                    if item.action == "play":
                        from platformcode import xbmctools
                        xbmctools.play_video(item)
                    return
    else:
        return itemlist
示例#58
0
def findvideos(item):
    logger.info("pelisalacarta.channels.descargasmix findvideos")
    if item.extra != "":
        return epienlaces(item)
    itemlist = []
    item.text_color = color3
    data = scrapertools.downloadpage(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year != "":
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    #Patron torrent
    matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
    for scrapedurl in matches:
        title = "[Torrent] "
        title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
        itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
    
    #Patron online
    data_online = scrapertools.find_single_match(data, 'Enlaces para ver online(.*?)<div class="section-box related-'
                                                       'posts">')
    if len(data_online) > 0:
        patron = 'dm\(c.a\(\'([^\']+)\''
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for code in matches:
            enlace = dm(code)
            enlaces = servertools.findvideos(data=enlace)
            if len(enlaces) > 0:
                title = "Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))

    #Patron descarga
    data_descarga = scrapertools.find_single_match(data, 'Enlaces de descarga(.*?)<script>')
    patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\''
    matches = scrapertools.find_multiple_matches(data_descarga, patron)
    for scrapedserver, scrapedurl in matches:
        if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
            scrapedserver = "uploadedto"
        titulo = scrapedserver.capitalize()
        if titulo == "Magnet":
            continue
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(scrapedserver)
        if mostrar_server:
            try:
                servers_module = __import__("servers."+scrapedserver)
                #Saca numero de enlaces
                patron = "(dm\(c.a\('"+scrapedurl.replace("+", "\+")+"'.*?)</div>"
                data_enlaces = scrapertools.find_single_match(data_descarga, patron)
                patron = 'dm\(c.a\(\'([^\']+)\''
                matches_enlaces = scrapertools.find_multiple_matches(data_enlaces, patron)
                numero = str(len(matches_enlaces))
                if item.category != "Cine":
                    itemlist.append(item.clone(action="enlaces", title=titulo+" - Nº enlaces:"+numero,
                                               extra=scrapedurl))
            except:
                pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.category != "Cine" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                             text_color="green"))

    return itemlist
示例#59
0
def findvideos(item):
    logger.info()
    if (item.extra and item.extra != "findvideos") or item.path:
        return epienlaces(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                       text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if enlaces and "peliculas.nu" not in enlaces:
                if i == 0:
                    extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title, action="", text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
    scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
                                       title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(item.clone(title="   Los enlaces se están subiendo", action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                           text_color="green"))
                continue
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist