コード例 #1
0
ファイル: cinefox.py プロジェクト: neno1978/pelisalacarta
def get_enlaces(item, url, type):
    itemlist = []
    itemlist.append(item.clone(action="", title="Enlaces %s" % type, text_color=color1))
    
    headers["Referer"] = item.url
    data = scrapertools.downloadpage(url, headers=headers.items())

    patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
             '.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
    matches = scrapertools.find_multiple_matches(data, patron)
    if matches:
        for scrapedurl, idioma, server, calidad in matches:
            if server == "streamin": server = "streaminto"
            if server == "waaw" or server == "miracine": server = "netutv"
            if server == "ul": server = "uploadedto"
            if server == "videogk":
                server = "vk"
                scrapedurl = scrapedurl.replace("http://videogk.com/", "https://vk.com/video_ext.php")
            if servertools.is_server_enabled(server):
                scrapedtitle = "    Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
                itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
                                           extra=""))

    if len(itemlist) == 1:
        itemlist.append(item.clone(title="   No hay enlaces disponibles", action="", text_color=color2))

    return itemlist
コード例 #2
0
def get_enlaces(item, url, type):
    itemlist = []

    data = httptools.downloadpage(url, add_referer=True).data

    if type == "Online":
        gg = httptools.downloadpage(item.url, add_referer=True).data
        bloque = scrapertools.find_single_match(gg,
                                                'class="tab".*?button show')
        patron = 'a href="#([^"]+)'
        patron += '.*?language-ES-medium ([^"]+)'
        patron += '.*?</i>([^<]+)'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedopcion, scrapedlanguage, scrapedcalidad in matches:
            google_url = scrapertools.find_single_match(
                bloque, 'id="%s.*?src="([^"]+)' % scrapedopcion)
            if "medium-es" in scrapedlanguage: language = "CAST"
            if "medium-en" in scrapedlanguage: language = "VO"
            if "medium-vs" in scrapedlanguage: language = "VOSE"
            if "medium-la" in scrapedlanguage: language = "LAT"
            titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
            itemlist.append(
                item.clone(action="play",
                           url=google_url,
                           title="    Ver en Gvideo" + titulo,
                           text_color=color2,
                           extra="",
                           server="gvideo",
                           language=language,
                           quality=scrapedcalidad.strip()))

    patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
             '.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'

    matches = scrapertools.find_multiple_matches(data, patron)
    if matches:
        for scrapedurl, idioma, server, calidad in matches:
            if server == "streamin": server = "streaminto"
            if server == "waaw" or server == "miracine": server = "netutv"
            if server == "ul": server = "uploadedto"
            if server == "player": server = "vimpleru"
            if servertools.is_server_enabled(server):
                scrapedtitle = "    Ver en " + server.capitalize(
                ) + " [" + idioma + "/" + calidad + "]"
                itemlist.append(
                    item.clone(action="play",
                               url=scrapedurl,
                               title=scrapedtitle,
                               text_color=color2,
                               extra="",
                               server=server,
                               language=IDIOMAS[idioma]))

    if len(itemlist) == 1:
        itemlist.append(
            item.clone(title="   No hay enlaces disponibles",
                       action="",
                       text_color=color2))

    return itemlist
コード例 #3
0
def get_enlaces(item, url, type):
    itemlist = []
    itemlist.append(
        item.clone(action="", title="Enlaces %s" % type, text_color=color1))

    data = httptools.downloadpage(url, add_referer=True).data
    patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
             '.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
    matches = scrapertools.find_multiple_matches(data, patron)
    if matches:
        for scrapedurl, idioma, server, calidad in matches:
            if server == "streamin": server = "streaminto"
            if server == "waaw" or server == "miracine": server = "netutv"
            if server == "ul": server = "uploadedto"
            if server == "player": server = "vimpleru"
            if servertools.is_server_enabled(server):
                scrapedtitle = "    Ver en " + server.capitalize(
                ) + " [" + idioma + "/" + calidad + "]"
                itemlist.append(
                    item.clone(action="play",
                               url=scrapedurl,
                               title=scrapedtitle,
                               text_color=color2,
                               extra="",
                               server=server))

    if len(itemlist) == 1:
        itemlist.append(
            item.clone(title="   No hay enlaces disponibles",
                       action="",
                       text_color=color2))

    return itemlist
コード例 #4
0
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \
             '"([^"]+)" season="' + \
             item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = "Episodio "+episode+" ["
            titulo += server.capitalize()+"]   ["+idioma+"] ("+calidad_videos.get(quality)+")"
            item.infoLabels['episode'] = episode

            itemlist.append(item.clone(action="play", title=titulo, url=url))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \
             '"([^"]+)" season="'+item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "Episodio "+episode+" "
                titulo += server.capitalize()+"   ["+idioma+"] ("+calidad_videos.get(quality)+")"
                item.infoLabels['episode'] = episode
                itemlist.append(item.clone(action="play", title=titulo, url=url))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
コード例 #5
0
ファイル: allpeliculas.py プロジェクト: kenodos/pelisalacarta
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="' + \
             str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'

    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"

            itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode"))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"
                itemlist.append(item.clone(action="play", title=titulo, url=url))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
コード例 #6
0
ファイル: vixto.py プロジェクト: neno1978/pelisalacarta
def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item):
    logger.info("pelisalacarta.channels.vixto bloque_enlaces")

    lista_enlaces = list()
    bloque = scrapertools.find_single_match(data, tipo + '(.*?)</table>')
    patron = '<td class="sape">\s*<i class="idioma-([^"]+)".*?href="([^"]+)".*?</p>.*?<td>([^<]+)</td>' \
             '.*?<td class="desaparecer">(.*?)</td>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    filtrados = []
    for language, scrapedurl, calidad, orden in matches:
        language = language.strip()
        server = scrapertools.find_single_match(scrapedurl, 'http(?:s|)://(?:www.|)(\w+).')
        if server == "ul":
            server = "uploadedto"
        if server == "streamin":
            server = "streaminto"
        if server == "waaw":
            server = "netutv"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(server)
        if mostrar_server:
            try:
                servers_module = __import__("servers." + server)
                title = "   Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")"
                if filtro_idioma == 3 or item.filtro:
                    lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
                                                    url=scrapedurl, idioma=language, orden=orden))
                else:
                    idioma = dict_idiomas[language]
                    if idioma == filtro_idioma:
                        lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",
                                                        url=scrapedurl, server=server, idioma=language, orden=orden))
                    else:
                        if language not in filtrados:
                            filtrados.append(language)
            except:
                pass

    order = config.get_setting("orderlinks", item.channel)
    if order == 0:
        lista_enlaces.sort(key=lambda item: item.server)
    elif order == 1:
        lista_enlaces.sort(key=lambda item: item.idioma)
    else:
        lista_enlaces.sort(key=lambda item: item.orden, reverse=True)

    if filtro_idioma != 3:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
                                            filtro=True))

    return lista_enlaces
コード例 #7
0
def findvideos(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data
    item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
    item.plot = scrapertools.htmlclean(item.plot).strip()
    item.contentPlot = item.plot
    al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
    if al_url_fa == "":
        al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"%s(.*?)" ' % host)
    if al_url_fa != "":
        al_url_fa = host + al_url_fa
        logger.info("torrent=" + al_url_fa)
        itemlist.append(
            Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
                 url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
                 parentContent=item))

    patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
    patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
    patron += '<\/div[^<]+<div class="box6">([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    itemlist_ver = []
    itemlist_descargar = []

    for servername, idioma, calidad, scrapedurl, comentarios in matches:
        title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
        servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier")
        if comentarios.strip() != "":
            title = title + " (" + comentarios.strip() + ")"
        url = urlparse.urljoin(item.url, scrapedurl)
        mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            thumbnail = servertools.guess_server_thumbnail(title)
            plot = ""
            logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
            action = "play"
            if "partes" in title:
                action = "extract_url"
            new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
                            thumbnail=thumbnail, plot=plot, parentContent=item, server = servername, quality=calidad)
            if comentarios.startswith("Ver en"):
                itemlist_ver.append(new_item)
            else:
                itemlist_descargar.append(new_item)

    itemlist.extend(itemlist_ver)
    itemlist.extend(itemlist_descargar)

    return itemlist
コード例 #8
0
ファイル: lacajita.py プロジェクト: vguardiola/addon
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data

    patron = '<div class="grid_content2 sno">.*?src="([^"]+)".*?href="([^"]+)".*?src=\'(.*?)(?:.png|.jpg)\'' \
             '.*?<span>.*?<span>(.*?)</span>.*?<span>(.*?)</span>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for idioma, url, servidor, calidad, detalle in matches:
        url = host + url
        servidor = servidor.rsplit("/", 1)[1]
        servidor = servidor.replace("uploaded", "uploadedto").replace(
            "streamin.to", "streaminto")
        if "streamix" in servidor:
            servidor = "streamixcloud"
        try:
            servers_module = __import__("servers." + servidor)
            mostrar_server = servertools.is_server_enabled(servidor)
            if not mostrar_server:
                continue
        except:
            continue

        if "es.png" in idioma:
            idioma = "ESP"
        elif "la.png" in idioma:
            idioma = "LAT"
        elif "vos.png" in idioma:
            idioma = "VOSE"

        title = "%s - %s - %s" % (servidor, idioma, calidad)
        if detalle:
            title += " (%s)" % detalle

        itemlist.append(
            item.clone(action="play",
                       url=url,
                       title=title,
                       server=servidor,
                       text_color=color3,
                       language=idioma,
                       quality=calidad))

    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(
            item.clone(title="Añadir película a la videoteca",
                       action="add_pelicula_to_library",
                       extra="findvideos",
                       text_color="green"))

    return itemlist
コード例 #9
0
def epienlaces(item):
    logger.info("pelisalacarta.channels.descargasmix epienlaces")
    itemlist = []
    item.text_color = color3
    
    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")

    #Bloque de enlaces
    delimitador = item.extra.strip()
    delimitador = re.sub(r'(?i)(\[(?:/|)Color.*?\])', '', delimitador)
    patron = '<div class="cap">'+delimitador+'(.*?)(?:<div class="polo"|</li>)'
    bloque = scrapertools.find_single_match(data, patron)
     
    patron = '<div class="episode-server">.*?href="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(item.clone(action="", title="Enlaces de Descarga/Online", text_color=color1))
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    " + scrapedserver.capitalize() + " [" + scrapedcalidad + "]"
        #Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(0, item.clone(action="play", title=titulo, server="torrent", url=scrapedurl))
        else:
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+scrapedserver)
                    if "enlacesmix.com" in scrapedurl:
                        itemlist.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
                                                   extra=item.url))
                    else:
                        enlaces = servertools.findvideos(data=scrapedurl)
                        if len(enlaces) > 0:
                            titulo = "    " + enlaces[0][2].capitalize() + "  [" + scrapedcalidad + "]"
                            itemlist.append(item.clone(action="play", server=enlaces[0][2], title=titulo,
                                                       url=enlaces[0][1]))
                except:
                    pass

    if itemlist[0].server == "torrent":
        itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))

    return itemlist
コード例 #10
0
def epienlaces(item):
    logger.info("pelisalacarta.channels.descargasmix epienlaces")
    itemlist = []
    item.text_color = color3
    
    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")

    #Bloque de enlaces
    delimitador = item.extra.strip()
    delimitador = re.sub(r'(?i)(\[(?:/|)Color.*?\])', '', delimitador)
    patron = '<div class="cap">'+delimitador+'(.*?)(?:<div class="polo"|</li>)'
    bloque = scrapertools.find_single_match(data, patron)
     
    patron = '<div class="episode-server">.*?href="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(item.clone(action="", title="Enlaces de Descarga/Online", text_color=color1))
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    " + scrapedserver.capitalize() + " [" + scrapedcalidad + "]"
        #Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(0, item.clone(action="play", title=titulo, server="torrent", url=scrapedurl))
        else:
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+scrapedserver)
                    if "enlacesmix.com" in scrapedurl:
                        itemlist.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
                                                   extra=item.url))
                    else:
                        enlaces = servertools.findvideos(data=scrapedurl)
                        if len(enlaces) > 0:
                            titulo = "    " + enlaces[0][2].capitalize() + "  [" + scrapedcalidad + "]"
                            itemlist.append(item.clone(action="play", server=enlaces[0][2], title=titulo,
                                                       url=enlaces[0][1]))
                except:
                    pass

    if itemlist[0].server == "torrent":
        itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))

    return itemlist
コード例 #11
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data

    # ~ var _sl = ['din', '30', '2da6172e33862f27e3f02da449a46131d9294ad75e80ed20', 'Cosas de críos [Castellano]', 'Dinosaurios'];
    _sa = re.findall('var _sa = (true|false);', data, flags=re.DOTALL)[0]
    _sl = re.findall(
        "var _sl = \['([^']*)',\s*'([^']*)',\s*'([^']*)',\s*'([^']*)'",
        data,
        flags=re.DOTALL)[0]
    if not _sa or not _sl: return itemlist

    aux = _sl[3].lower().replace('(', '[').replace(')', ']')
    if '[castellano]' in aux: lang = 'Esp'
    elif '[ingles]' in aux: lang = 'Eng'
    else:
        lang = scrapertools.find_single_match(
            data, '<span>Idioma:\s*</span>([^<]+)')
        if 'Latino' in lang: lang = 'Lat'

    matches = re.findall('<button class="selop" sl="([^"]+)">([^<]+)</button>',
                         data,
                         flags=re.DOTALL)
    for num, nombre in matches:
        # ~ logger.info('%s %s' % (num, nombre))
        url = resuelve_golink(int(num), _sa, _sl)
        # ~ logger.info(url)
        server = servertools.corregir_servidor(nombre)
        if server:
            if not servertools.is_server_available(server):
                server = ''  # indeterminado
            elif not servertools.is_server_enabled(server):
                continue  # descartar desactivados

        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 server=server,
                 language=lang,
                 title='',
                 url=url,
                 other=nombre if not server else ''))

    return itemlist
コード例 #12
0
ファイル: oranline.py プロジェクト: jurrKodi/pelisalacarta
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
    logger.info("pelisalacarta.channels.oranline bloque_enlaces")

    lista_enlaces = []
    bloque = scrapertools.find_single_match(data, '<div id="' + type + '">(.*?)</table>')
    patron = 'tr>[^<]*<td>.*?href="([^"]+)".*?<span>([^<]+)</span>' \
             '.*?<td>([^<]+)</td>.*?<td>([^<]+)</td>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    filtrados = []
    for scrapedurl, server, language, calidad in matches:
        language = language.strip()
        server = server.lower()
        if server == "ul":
            server = "uploadedto"
        if server == "streamin":
            server = "streaminto"
        if server == "waaw":
            server = "netutv"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(server)
        if mostrar_server:
            try:
                servers_module = __import__("servers." + server)
                title = "   Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")"
                if filtro_idioma == 4 or item.filtro or item.extra == "findvideos":
                    lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
                                                    url=scrapedurl, idioma=language))
                else:
                    idioma = dict_idiomas[language]
                    if idioma == filtro_idioma:
                        lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",
                                                        url=scrapedurl, server=server))
                    else:
                        if language not in filtrados: filtrados.append(language)
            except:
                pass

    if filtro_idioma != 4:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
                                            filtro=True))

    return lista_enlaces
コード例 #13
0
ファイル: descargasmix.py プロジェクト: CYBERxNUKE/xbmc-addon
def epienlaces(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    data = data.replace("\n", "").replace("\t", "")
    # Bloque de enlaces
    patron = '<div class="polo".*?>%s(.*?)(?:<div class="polo"|</li>)' % item.extra.strip()
    bloque = scrapertools.find_single_match(data, patron)

    patron = '<div class="episode-server">.*?data-sourcelk="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(item.clone(action="", title="Enlaces Online/Descarga", text_color=color1))
    lista_enlaces = []
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    %s [%s]" % (unicode(scrapedserver, "utf-8").capitalize().encode("utf-8"), scrapedcalidad)
        # Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(0, item.clone(action="play", title=titulo, server="torrent", url=scrapedurl, extra=item.url))
        else:
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
                                                    extra=item.url))
                except:
                    pass
    lista_enlaces.reverse()
    itemlist.extend(lista_enlaces)

    if itemlist[0].server == "torrent":
        itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))

    return itemlist
コード例 #14
0
ファイル: descargasmix.py プロジェクト: tamamma/addon
def episode_links(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    data = data.replace("\n", "").replace("\t", "")

    # Bloque de enlaces
    patron = '<div class="polo".*?>%s(.*?)(?:<div class="polo"|</li>)' % item.extra.strip()
    bloque = scrapertools.find_single_match(data, patron)

    patron = '<div class="episode-server">.*?data-sourcelk="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(item.clone(action="", title="Enlaces Online/Descarga", text_color=color1))
    lista_enlaces = []
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    %s [%s]" % (unicode(scrapedserver, "utf-8").capitalize().encode("utf-8"), scrapedcalidad)
        # Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(0,
                            item.clone(action="play", title=titulo, server="torrent", url=scrapedurl, extra=item.url))
        else:
            if servertools.is_server_enabled(scrapedserver):
                try:
                    # servers_module = __import__("servers." + scrapedserver)
                    lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
                                                    extra=item.url))
                except:
                    pass
    lista_enlaces.reverse()
    itemlist.extend(lista_enlaces)

    if itemlist[0].server == "torrent":
        itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))

    return itemlist
コード例 #15
0
ファイル: hdfull.py プロジェクト: jurrKodi/pelisalacarta
def findvideos(item):
    logger.info("pelisalacarta.channels.hdfull findvideos")

    itemlist=[]

    ## Carga estados
    status = jsontools.load_json(scrapertools.cache_page(host+'/a/status/all'))

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_library_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
            itemlist.append( Item( channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False ) )

            title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append( Item( channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show ) )

        itemlist.append( Item( channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True ) )

    data = agrupa_datos( scrapertools.cache_page(item.url) )

    patron  = '<div class="embed-selector"[^<]+'
    patron += '<h5 class="left"[^<]+'
    patron += '<span[^<]+<b class="key">\s*Idioma.\s*</b>([^<]+)</span[^<]+'
    patron += '<span[^<]+<b class="key">\s*Servidor.\s*</b><b[^>]+>([^<]+)</b[^<]+</span[^<]+'
    patron += '<span[^<]+<b class="key">\s*Calidad.\s*</b>([^<]+)</span[^<]+</h5.*?'
    patron += '<a href="(http[^"]+)".*?'
    patron += '</i>([^<]+)</a>'

    matches = re.compile(patron,re.DOTALL).findall(data)

    for idioma,servername,calidad,url,opcion in matches:
        opcion = opcion.strip()
        if opcion != "Descargar":
            opcion = "Ver"
        title = opcion+": "+servername.strip()+" ("+calidad.strip()+")"+" ("+idioma.strip()+")"
        title = scrapertools.htmlclean(title)
        #Se comprueba si existe el conector y si se oculta en caso de premium
        servername = servername.lower().split(".")[0]

        if servername == "streamin": servername = "streaminto"
        if servername== "waaw": servername = "netutv"
        if servername == "ul": servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium")=="true":
            mostrar_server= servertools.is_server_enabled (servername)
        if mostrar_server:
            try:
                servers_module = __import__("servers."+servername)
                thumbnail = item.thumbnail
                plot = item.title+"\n\n"+scrapertools.find_single_match(data,'<meta property="og:description" content="([^"]+)"')
                plot = scrapertools.htmlclean(plot)
                fanart = scrapertools.find_single_match(data,'<div style="background-image.url. ([^\s]+)')

                url+= "###" + id + ";" + type

                itemlist.append( Item( channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, show=item.show, folder=True ) )
            except:
                pass

    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..."
        try: itemlist.extend( file_cine_library(item,url_targets) )
        except: pass

    return itemlist
コード例 #16
0
ファイル: cultmoviez.py プロジェクト: enursha101/xbmc-addon
def findvideos(item):
    logger.info("pelisalacarta.cultmoviez findvideos")
    if item.fanart == "": item.fanart = fanart
    itemlist = []
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:22.0) Gecko/20100101 Firefox/22.0',
        'Accept-Encoding': 'none',
        'Host': 'www.cultmoviez.info'
    }
    try:
        serie = item.extra.split("|")[0]
        id = item.extra.split("|")[1]

        url_wp_plugin = urlparse.urljoin(host, wp_plugin)
        data = agrupa_datos(
            scrapertools.cache_page(url_wp_plugin,
                                    post="serie=" + serie +
                                    "&episodios=1&id=" + id))

        #<div style='float:left; margin-right:10px; margin-bottom:10px;'><img src='https://lh4.googleusercontent.com/-_uQM5fI03ZE/UhrwpxoqEqI/AAAAAAAAIlA/pMF4wCIgNW8/s171/rsz_american-horror-story-season-1-new-promotional-poster-american-horror-story-24824740-1125-1500.jpg' width='132' height='175'/></div><p><div><strong><u>1.01 Pilot</u></strong></div></p><div><strong>Sinopsis:</strong> Ahora podrás ver American Horror Story: Murder House 1.01 Pilot online subtitulada
        #                                            Primer capítulo de La primera temporada de American Horror Story la serie creada por Ryan Murphy y Brad Falchuk
        #
        #                                            Un terapeuta y su familia se mudan de la ciudad para escaparse de sus problemas del pasado, pero rápidamente descubren que su nueva casa viene con su propio<p><div><a href='http://www.cultmoviez.info/12907'><img src='http://www.cultmoviez.info/wp-content/uploads/2013/10/ver-capitulo.png'/></a></div></p></div><div style='clear:both;'></div>

        url_for_servers_data = scrapertools.get_match(data,
                                                      "<a href='([^']+)'>")
        data = agrupa_datos(scrapertools.cache_page(url_for_servers_data))
    except:
        data = agrupa_datos(scrapertools.cache_page(item.url, headers=headers))

    data = re.sub(r"hd=", "=", data)
    data = data.replace("?&", "?")

    #<iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?uphd=jxr5zqbl5tdt&bshd=ge8cd4xp&fkhd=5v4vb9em/CS01E01.mp4.html&billhd=ojgo8mwi1dvz&moohd=070i7sxmckbq&plahd=3rm7pwhruyk4&upahd=1n0yqd53swtg&vbhd=ugezmymo75bg&vk1hd=oid=191530510|id=167180035|hash=57a118c8723792e6|hd%3D2&id=00C01&sub=,ES&sub_pre=ES" frameborder="0" allowfullscreen></iframe>

    # <iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?bs=aeosek34&fk=t729bc9t/CultG240.mp4.html&up=k4n47ii5mgg7&vb=1wlt1mjdh5hx&dp=k8vs5y6j8&moo=p3b3vrlb421b&pla=xq5o2b930e7f&upa=22k5u2ivnts9&vk1=oid=251747296|id=169564765|hash=4947cca79d1da180|hd%3D2&v=2.0.2" frameborder="0" allowfullscreen></iframe>

    #<iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?&bs=ricxefnc&fk=gamnlwjx/American.Horror.Story.S01E02.DVDRip.XviD-DEMAND.mp4.html&up=zjqtcmeio58c&id=001AHS2&sub=,ES&sub_pre=ES" frameborder="0" allowfullscreen></iframe>

    try:
        search_data_for_servers = scrapertools.get_match(
            data, "<iframe[^\?]+\?(.*?)&id=(.*?)&")
    except:
        search_data_for_servers = scrapertools.get_match(
            data, "<iframe[^\?]+\?(.*?)&v=(.*?)&")

    #Id para el subtitulo
    id = search_data_for_servers[1] + "_ES"

    servers_data_list = []
    for serverdata in search_data_for_servers[0].split("&"):
        server_id = scrapertools.get_match(serverdata, "(^\w+)=")
        video_id = scrapertools.get_match(serverdata, "^\w+=(.*?$)")
        servers_data_list.append([server_id, video_id])

    for server_id, video_id in servers_data_list:
        if server_id != "oid": server = server_label(server_id)
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(server)
        if mostrar_server:
            try:
                if server != "uptostream":
                    servers_module = __import__("servers." + server)
                video_link = server_link(server_id) % (video_id.replace(
                    "|", "&"))
                # Comprueba si el enlace directo no está caído
                if server == "directo":
                    post = "fv=20&url=" + video_link + "&sou=pic"
                    data = scrapertools.cache_page(
                        "http://www.cultmoviez.info/playercult/pk/pk/plugins/player_p2.php",
                        post=post)
                    if data == "": continue
                title = item.title + " [" + server + "]"
                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=video_link,
                         action="play",
                         thumbnail=item.thumbnail,
                         fanart=item.fanart,
                         plot=item.plot,
                         extra=id))
            except:
                pass

    return itemlist
コード例 #17
0
def findvideos(item):
    logger.info()
    if (item.extra and item.extra != "findvideos") or item.path:
        return epienlaces(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    item.plot = scrapertools.find_single_match(
        data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(
        data,
        '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(
            data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(
                scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(
                scrapertools.find_single_match(scrapedurl,
                                               'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(
                item.clone(action="play",
                           server="torrent",
                           title=title,
                           url=scrapedurl,
                           text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(
        data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if enlaces and "peliculas.nu" not in enlaces:
                if i == 0:
                    extra_info = scrapertools.find_single_match(
                        data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(
                        data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title,
                                          action="",
                                          text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(
                    item.clone(action="play",
                               server=enlaces[0][2],
                               title=title,
                               url=enlaces[0][1]))
    scriptg = scrapertools.find_single_match(
        data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(
                item.clone(
                    action="play",
                    server="directo",
                    url=url,
                    extra=item.url,
                    title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(
            bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque,
                                              '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(
                item.clone(title="   Los enlaces se están subiendo",
                           action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver,
                             "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(
                    scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '',
                                                   scrapedurl))
                itemlist.append(
                    item.clone(action="play",
                               server="torrent",
                               title=title,
                               url=scrapedurl,
                               text_color="green"))
                continue
            if servertools.is_server_enabled(scrapedserver):
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(
                        item.clone(action="enlaces",
                                   title=titulo,
                                   extra=scrapedurl,
                                   server=scrapedserver))
                except:
                    pass

    itemlist.append(
        item.clone(channel="trailertools",
                   title="Buscar Tráiler",
                   action="buscartrailer",
                   context="",
                   text_color="magenta"))
    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la videoteca",
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 url=item.url,
                 infoLabels={'title': item.fulltitle},
                 fulltitle=item.fulltitle,
                 text_color="green"))

    return itemlist
コード例 #18
0
def findvideos(item):
    logger.info("pelisalacarta.channels.descargasmix findvideos")
    if item.extra != "":
        return epienlaces(item)
    itemlist = []
    item.text_color = color3
    data = scrapertools.downloadpage(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year != "":
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    #Patron torrent
    matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
    for scrapedurl in matches:
        title = "[Torrent] "
        title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
        itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
    
    #Patron online
    data_online = scrapertools.find_single_match(data, 'Enlaces para ver online(.*?)<div class="section-box related-'
                                                       'posts">')
    if len(data_online) > 0:
        patron = 'dm\(c.a\(\'([^\']+)\''
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for code in matches:
            enlace = dm(code)
            enlaces = servertools.findvideos(data=enlace)
            if len(enlaces) > 0:
                title = "Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))

    #Patron descarga
    data_descarga = scrapertools.find_single_match(data, 'Enlaces de descarga(.*?)<script>')
    patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\''
    matches = scrapertools.find_multiple_matches(data_descarga, patron)
    for scrapedserver, scrapedurl in matches:
        if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
            scrapedserver = "uploadedto"
        titulo = scrapedserver.capitalize()
        if titulo == "Magnet":
            continue
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(scrapedserver)
        if mostrar_server:
            try:
                servers_module = __import__("servers."+scrapedserver)
                #Saca numero de enlaces
                patron = "(dm\(c.a\('"+scrapedurl.replace("+", "\+")+"'.*?)</div>"
                data_enlaces = scrapertools.find_single_match(data_descarga, patron)
                patron = 'dm\(c.a\(\'([^\']+)\''
                matches_enlaces = scrapertools.find_multiple_matches(data_enlaces, patron)
                numero = str(len(matches_enlaces))
                if item.category != "Cine":
                    itemlist.append(item.clone(action="enlaces", title=titulo+" - Nº enlaces:"+numero,
                                               extra=scrapedurl))
            except:
                pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.category != "Cine" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                             text_color="green"))

    return itemlist
コード例 #19
0
ファイル: cultmoviez.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info("pelisalacarta.cultmoviez findvideos")
    if item.fanart == "": item.fanart = fanart
    itemlist=[]
    headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:22.0) Gecko/20100101 Firefox/22.0',
               'Accept-Encoding': 'none',
               'Host':'www.cultmoviez.info'}
    try:
        serie = item.extra.split("|")[0]
        id = item.extra.split("|")[1]

        url_wp_plugin = urlparse.urljoin(host,wp_plugin)
        data = agrupa_datos( scrapertools.cache_page(url_wp_plugin,post="serie="+serie+"&episodios=1&id="+id) )

        #<div style='float:left; margin-right:10px; margin-bottom:10px;'><img src='https://lh4.googleusercontent.com/-_uQM5fI03ZE/UhrwpxoqEqI/AAAAAAAAIlA/pMF4wCIgNW8/s171/rsz_american-horror-story-season-1-new-promotional-poster-american-horror-story-24824740-1125-1500.jpg' width='132' height='175'/></div><p><div><strong><u>1.01 Pilot</u></strong></div></p><div><strong>Sinopsis:</strong> Ahora podrás ver American Horror Story: Murder House 1.01 Pilot online subtitulada
        #                                            Primer capítulo de La primera temporada de American Horror Story la serie creada por Ryan Murphy y Brad Falchuk
        #                                            
        #                                            Un terapeuta y su familia se mudan de la ciudad para escaparse de sus problemas del pasado, pero rápidamente descubren que su nueva casa viene con su propio<p><div><a href='http://www.cultmoviez.info/12907'><img src='http://www.cultmoviez.info/wp-content/uploads/2013/10/ver-capitulo.png'/></a></div></p></div><div style='clear:both;'></div>

        url_for_servers_data = scrapertools.get_match(data,"<a href='([^']+)'>")
        data = agrupa_datos( scrapertools.cache_page(url_for_servers_data) )
    except:
        data = agrupa_datos( scrapertools.cache_page(item.url, headers=headers) )

    data = re.sub(r"hd=","=",data)
    data = data.replace("?&","?")

    #<iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?uphd=jxr5zqbl5tdt&bshd=ge8cd4xp&fkhd=5v4vb9em/CS01E01.mp4.html&billhd=ojgo8mwi1dvz&moohd=070i7sxmckbq&plahd=3rm7pwhruyk4&upahd=1n0yqd53swtg&vbhd=ugezmymo75bg&vk1hd=oid=191530510|id=167180035|hash=57a118c8723792e6|hd%3D2&id=00C01&sub=,ES&sub_pre=ES" frameborder="0" allowfullscreen></iframe>

    # <iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?bs=aeosek34&fk=t729bc9t/CultG240.mp4.html&up=k4n47ii5mgg7&vb=1wlt1mjdh5hx&dp=k8vs5y6j8&moo=p3b3vrlb421b&pla=xq5o2b930e7f&upa=22k5u2ivnts9&vk1=oid=251747296|id=169564765|hash=4947cca79d1da180|hd%3D2&v=2.0.2" frameborder="0" allowfullscreen></iframe>

    #<iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?&bs=ricxefnc&fk=gamnlwjx/American.Horror.Story.S01E02.DVDRip.XviD-DEMAND.mp4.html&up=zjqtcmeio58c&id=001AHS2&sub=,ES&sub_pre=ES" frameborder="0" allowfullscreen></iframe>

    try:
        search_data_for_servers = scrapertools.get_match(data,"<iframe[^\?]+\?(.*?)&id=(.*?)&")
    except:
        search_data_for_servers = scrapertools.get_match(data,"<iframe[^\?]+\?(.*?)&v=(.*?)&")

    #Id para el subtitulo
    id = search_data_for_servers[1] + "_ES"

    servers_data_list = []
    for serverdata in search_data_for_servers[0].split("&"):
        server_id = scrapertools.get_match(serverdata,"(^\w+)=") 
        video_id = scrapertools.get_match(serverdata,"^\w+=(.*?$)") 
        servers_data_list.append( [server_id, video_id] )

    for server_id, video_id in servers_data_list:
        if server_id != "oid": server = server_label(server_id)
        mostrar_server = True
        if config.get_setting("hidepremium")=="true":
            mostrar_server= servertools.is_server_enabled (server)
        if mostrar_server:
            try:
                if server != "uptostream": servers_module = __import__("servers."+server)
                video_link = server_link(server_id) % (video_id.replace("|","&"))
                # Comprueba si el enlace directo no está caído
                if server == "directo":
                    post = "fv=20&url="+video_link+"&sou=pic"
                    data = scrapertools.cache_page("http://www.cultmoviez.info/playercult/pk/pk/plugins/player_p2.php", post=post)
                    if data == "": continue
                title = item.title + " [" + server + "]"
                itemlist.append( Item(channel=item.channel, title =title, url=video_link, action="play", thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, extra=id ) )
            except:
                pass

    return itemlist
コード例 #20
0
ファイル: hdfull.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()

    itemlist=[]
    ## Carga estados
    status = jsontools.load_json(httptools.downloadpage(host+'/a/status/all').data)

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_library_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
            itemlist.append( Item( channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False ) )

            title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append( Item( channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show ) )

        itemlist.append( Item( channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True ) )


    data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
    key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)
    
        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos( httptools.downloadpage(item.url).data )
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load_json(obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year

    var0 = scrapertools.find_single_match(data_js, 'var_0=\[(.*?)\]').split(",")
    matches = []
    for match in data_decrypt:
        prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
        function = prov["l"].replace("code", match["code"]).replace("var_2", match["code"])
        index = scrapertools.find_single_match(function, 'var_1\[(\d+)\]')
        function = function.replace("var_1[%s]" % index, var0[int(index)])

        url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}")
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        index = scrapertools.find_single_match(prov["e"], 'var_1\[(\d+)\]')
        embed = prov["e"].replace("var_1[%s]" % index, var0[int(index)])

        matches.append([match["lang"], match["quality"], url, embed])

    enlaces = []
    for idioma, calidad, url, embed in matches:
        servername = scrapertools.find_single_match(url, "(?:http:|https:)//(?:www.|)([^.]+).")
        if servername == "streamin": servername = "streaminto"
        if servername== "waaw": servername = "netutv"
        if servername == "uploaded" or servername == "ul": servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            option = "Ver"
            if re.search(r'return ([\'"]{2,}|\})', embed):
                option = "Descargar"
            calidad = unicode(calidad, "utf8").upper().encode("utf8")
            servername_c = unicode(servername, "utf8").capitalize().encode("utf8")
            title = option+": "+servername_c+" ("+calidad+")"+" ("+idioma+")"
            thumbnail = item.thumbnail
            plot = item.title+"\n\n"+scrapertools.find_single_match(data,'<meta property="og:description" content="([^"]+)"')
            plot = scrapertools.htmlclean(plot)
            fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
            if account:
                url += "###" + id + ";" + type

            enlaces.append(Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, show=item.show, folder=True, server=servername, infoLabels=infolabels, contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))

    enlaces.sort(key=lambda it:it.tipo, reverse=True)
    itemlist.extend(enlaces)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..."
        try: itemlist.extend( file_cine_library(item,url_targets) )
        except: pass

    return itemlist
コード例 #21
0
ファイル: dascer.py プロジェクト: shlibidon/addon
def findvideos(item):
    logger.info()
    
    itemlist = []
    itemlist_t = []                                                             #Itemlist total de enlaces
    itemlist_f = []                                                             #Itemlist de enlaces filtrados
    matches = []

    #logger.debug(item)

    #Ahora tratamos los enlaces .torrent con las diferentes calidades
    for scrapedurl, scrapedserver in item.url_enlaces:

        #Generamos una copia de Item para trabajar sobre ella
        item_local = item.clone()

        item_local.url = scrapedurl
        item_local.server = scrapedserver.lower()
        item_local.action = "play" 
        
        #Buscamos tamaño en el archivo .torrent
        size = ''
        if item_local.server == 'torrent' and not size and not item_local.url.startswith('magnet:'):
            size = generictools.get_torrent_size(item_local.url) #              Buscamos el tamaño en el .torrent desde la web

        if size:
            size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
                        .replace('Mb', 'M·b').replace('.', ',')
            item_local.torrent_info = '%s, ' % size                             #Agregamos size
        if item_local.url.startswith('magnet:') and not 'Magnet' in item_local.torrent_info:
            item_local.torrent_info += ' Magnet'
        if item_local.torrent_info:
            item_local.torrent_info = item_local.torrent_info.strip().strip(',')
            if not item.unify:
                item_local.torrent_info = '[%s]' % item_local.torrent_info

        #Ahora pintamos lo enlaces
        item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][%s][/COLOR] ' %item_local.server.capitalize() \
                        + '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \
                        (item_local.quality, str(item_local.language), \
                        item_local.torrent_info)

        # Verificamos enlaces
        if item_local.server != 'torrent':
            if config.get_setting("hidepremium"):                               #Si no se aceptan servidore premium, se ignoran
                if not servertools.is_server_enabled(item_local.server):
                    continue
            devuelve = servertools.findvideosbyserver(item_local.url, item_local.server)    #existe el link ?
            if not devuelve:
                continue
            item_local.url = devuelve[0][1]
            item_local.alive = servertools.check_video_link(item_local.url, item_local.server, timeout=timeout)     #activo el link ?
            if 'NO' in item_local.alive:
                continue
        else:
            if not size or 'Magnet' in size:
                item_local.alive = "??"                                         #Calidad del link sin verificar
            elif 'ERROR' in size:
                item_local.alive = "no"                                         #Calidad del link en error?
                continue
            else:
                item_local.alive = "ok"                                         #Calidad del link verificada
        
        itemlist_t.append(item_local.clone())                                   #Pintar pantalla, si no se filtran idiomas
        
        # Requerido para FilterTools
        if config.get_setting('filter_languages', channel) > 0:                 #Si hay idioma seleccionado, se filtra
            itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language)  #Pintar pantalla, si no está vacío

    if len(itemlist_f) > 0:                                                     #Si hay entradas filtradas...
        itemlist.extend(itemlist_f)                                             #Pintamos pantalla filtrada
    else:                                                                       
        if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
            thumb_separador = get_thumb("next.png")                             #... pintamos todo con aviso
            itemlist.append(Item(channel=item.channel, url=host, 
                        title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", 
                        thumbnail=thumb_separador, folder=False))
        itemlist.extend(itemlist_t)                                             #Pintar pantalla con todo si no hay filtrado
    
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)                                              #Lanzamos Autoplay
    
    return itemlist
コード例 #22
0
def findvideos(item):
    logger.info("pelisalacarta.channels.descargasmix findvideos")
    if item.extra and item.extra != "findvideos":
        return epienlaces(item)
    itemlist = []
    item.text_color = color3
    data = scrapertools.downloadpage(item.url)

    item.plot = scrapertools.find_single_match(
        data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(
        data,
        '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year != "":
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    #Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(
            data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            title = "[Torrent] "
            title += urllib.unquote(
                scrapertools.find_single_match(scrapedurl,
                                               'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(
                item.clone(action="play",
                           server="torrent",
                           title=title,
                           url=scrapedurl,
                           text_color="green"))

    #Patron online
    data_online = scrapertools.find_single_match(
        data, 'Ver online</div>(.*?)<div class="section-box related-'
        'posts">')
    if len(data_online) > 0:
        itemlist.append(
            item.clone(title="Enlaces Online", action="", text_color=color1))
        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for code in matches:
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if len(enlaces) > 0:
                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(
                    item.clone(action="play",
                               server=enlaces[0][2],
                               title=title,
                               url=enlaces[0][1]))

    #Patron descarga
    patron = '<div class="floatLeft double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="contentModuleSmall mirrors">|<div class="section-box related-posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        itemlist.append(
            item.clone(title=title_bloque, action="", text_color=color1))
        if '<div class="subiendo">' in bloque:
            itemlist.append(
                item.clone(title="   Los enlaces se están subiendo",
                           action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = scrapedserver.capitalize()
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                itemlist.append(
                    item.clone(action="play",
                               server="torrent",
                               title=title,
                               url=scrapedurl,
                               text_color="green"))
                continue
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    #Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   " + titulo + " - Nº enlaces:" + numero
                    itemlist.append(
                        item.clone(action="enlaces",
                                   title=titulo,
                                   extra=scrapedurl))
                except:
                    pass

    itemlist.append(
        item.clone(channel="trailertools",
                   title="Buscar Tráiler",
                   action="buscartrailer",
                   context="",
                   text_color="magenta"))
    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la biblioteca",
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 url=item.url,
                 infoLabels={'title': item.fulltitle},
                 fulltitle=item.fulltitle,
                 text_color="green"))

    return itemlist
コード例 #23
0
ファイル: newpct1.py プロジェクト: neno1978/pelisalacarta
def findvideos(item):
    logger.info("[newpct1.py] findvideos")
    itemlist=[]   
          
    ## Cualquiera de las tres opciones son válidas
    #item.url = item.url.replace("1.com/","1.com/ver-online/")
    #item.url = item.url.replace("1.com/","1.com/descarga-directa/")
    item.url = item.url.replace("1.com/","1.com/descarga-torrent/")

    # Descarga la página
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url))
    data = unicode( data, "iso-8859-1" , errors="replace" ).encode("utf-8")
    
    title = scrapertools.find_single_match(data,"<h1><strong>([^<]+)</strong>[^<]+</h1>")
    title+= scrapertools.find_single_match(data,"<h1><strong>[^<]+</strong>([^<]+)</h1>")
    caratula = scrapertools.find_single_match(data,'<div class="entry-left">.*?src="([^"]+)"')

    #<a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>

    patron = '<a href="([^"]+)" title="[^"]+" class="btn-torrent" target="_blank">'

    # escraped torrent
    url = scrapertools.find_single_match(data,patron)
    if url!="":
        itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title+" [torrent]", fulltitle=title, url=url , thumbnail=caratula, plot=item.plot, folder=False) )

    # escraped ver vídeos, descargar vídeos un link, múltiples liks
    data = data.replace("'",'"')
    data = data.replace('javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=',"")
    data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=","")
    data = data.replace("$!","#!")

    patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
    patron_ver = '<div id="tab3"[^>]+>.*?</ul>'

    match_ver = scrapertools.find_single_match(data,patron_ver)
    match_descargar = scrapertools.find_single_match(data,patron_descargar)

    patron = '<div class="box1"><img src="([^"]+)".*?' # logo
    patron+= '<div class="box2">([^<]+)</div>'         # servidor
    patron+= '<div class="box3">([^<]+)</div>'         # idioma
    patron+= '<div class="box4">([^<]+)</div>'         # calidad
    patron+= '<div class="box5"><a href="([^"]+)".*?'  # enlace
    patron+= '<div class="box6">([^<]+)</div>'         # titulo

    enlaces_ver = re.compile(patron,re.DOTALL).findall(match_ver)
    enlaces_descargar = re.compile(patron,re.DOTALL).findall(match_descargar)

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
        servidor = servidor.replace("streamin","streaminto")
        titulo = titulo+" ["+servidor+"]"
        mostrar_server= True
        if config.get_setting("hidepremium")=="true":
            mostrar_server= servertools.is_server_enabled (servidor)
        if mostrar_server:
            try:
                servers_module = __import__("servers."+servidor)
                server_module = getattr(servers_module,servidor)
                devuelve= server_module.find_videos(enlace)
                if devuelve:
                    enlace=devuelve[0][1]
                    itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo , fulltitle = item.title, url=enlace , thumbnail=logo , plot=item.plot, folder=False) )
            except:
                pass
        
    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
        servidor = servidor.replace("uploaded","uploadedto")
        partes = enlace.split(" ")
        p = 1
        for enlace in partes:
            parte_titulo = titulo+" (%s/%s)" % (p,len(partes)) + " ["+servidor+"]"
            p+= 1
            mostrar_server= True
            if config.get_setting("hidepremium")=="true":
                mostrar_server= servertools.is_server_enabled (servidor)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+servidor)
                    server_module = getattr(servers_module,servidor)
                    devuelve= server_module.find_videos(enlace)
                    if devuelve:
                        enlace=devuelve[0][1]
                        itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo , fulltitle = item.title, url=enlace , thumbnail=logo , plot=item.plot, folder=False) )
                except:
                    pass
    return itemlist
コード例 #24
0
ファイル: allpeliculas.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideostv(item):
    logger.info()
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
             '" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
             % (str(item.infoLabels['episode']), str(item.infoLabels['season']))
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            if server == "tusfiles" and "stormo.tv" in url:
                server = "stormo"
            if server != "tusfiles":
                servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            elif server == "tusfiles":
                url = "http://tusfiles.org/?%s" % url
                server = "directo"
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"

            itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
             '" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
             % (str(item.infoLabels['episode']), str(item.infoLabels['season']))
    #patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"
                itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
コード例 #25
0
ファイル: allpeliculas.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    if item.extra != "library":
        try:
            from core import tmdb
            tmdb.set_infoLabels(item, __modo_grafico__)
        except:
            pass

    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            if server == "tusfiles" and "stormo.tv" in url:
                server = "stormo"
            if server != "tusfiles":
                servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            elif server == "tusfiles":
                url = "http://tusfiles.org/?%s" % url
                server = "directo"
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+"  ["+idioma+"] ["+calidad_videos.get(calidad)+"]"
            itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "["+server.capitalize()+"]  ["+idioma+"] ["+calidad_videos.get(calidad)+"]"
                itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server))

    itemlist.sort(key=lambda item: (item.extra, item.server))
    if itemlist:
        if not "trailer" in item.infoLabels:
            trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
            item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")

        itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                   text_color="magenta", context=""))
        if item.extra != "library":
            if config.get_library_support():
                itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
                                     action="add_pelicula_to_library", url=item.url, text_color="green",
                                     infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                                     extra="library"))

    return itemlist
コード例 #26
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    if item.extra != "library":
        try:
            from core import tmdb
            tmdb.set_infoLabels(item, __modo_grafico__)
        except:
            pass

    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            if server != "tusfiles":
                servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            elif server == "tusfiles":
                url = "http://tusfiles.org/?%s" % url
                server = "directo"
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+"  ["+idioma+"] ["+calidad_videos.get(calidad)+"]"
            itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "["+server.capitalize()+"]  ["+idioma+"] ["+calidad_videos.get(calidad)+"]"
                itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server))

    itemlist.sort(key=lambda item: (item.extra, item.server))
    if itemlist:
        if not "trailer" in item.infoLabels:
            trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
            item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")

        itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                   text_color="magenta", context=""))
        if item.extra != "library":
            if config.get_library_support():
                itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
                                     action="add_pelicula_to_library", url=item.url, text_color="green",
                                     infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                                     extra="library"))

    return itemlist
コード例 #27
0
def findvideos(item):
    logger.info()
    itemlist = []

    IDIOMAS = {'spanish': 'Esp', 'vose': 'VOSE'}

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Ver en línea / descargas
    done_mycinedesiempre = False
    for tipo in ['videos', 'download']:
        bloque = scrapertools.find_single_match(
            data, "<div id='%s'(.*?)</table>" % tipo)
        # ~ logger.debug(bloque)

        matches = scrapertools.find_multiple_matches(
            bloque, "<tr id='link-[^']+'>(.*?)</tr>")
        for enlace in matches:
            # ~ logger.debug(enlace)

            url = scrapertools.find_single_match(enlace, " href='([^']+)")
            if not url: continue
            if '.us.archive.org' in enlace: servidor = 'directo'
            elif 'archive.org' in enlace: servidor = 'archiveorg'
            else:
                servidor = corregir_servidor(
                    scrapertools.find_single_match(enlace, "domain=([^'.]+)"))
            if not servidor: continue

            if servidor == 'mycinedesiempre':
                if done_mycinedesiempre:
                    continue  # No repetir acceso a mycinedesiempre
                data2 = httptools.downloadpage(url).data
                url = scrapertools.find_single_match(
                    data2, '<a id="link" rel="nofollow" href="([^"]+)')
                if url:
                    done_mycinedesiempre = True
                    data2 = httptools.downloadpage(url).data
                    itemlist.extend(extraer_mycinedesiempre(data2))
                continue

            tds = scrapertools.find_multiple_matches(enlace, '<td>(.*?)</td>')
            lang = tds[1].lower()
            other = 'hace ' + tds[3]
            # ~ other += ', ' + tipo

            itemlist.append(
                Item(channel=item.channel,
                     action='play',
                     server=servidor,
                     title='',
                     url=url,
                     language=IDIOMAS.get(lang, lang),
                     other=other))

    # Embeds (iframes / sources)
    itemlist.extend(extraer_embeds(data))

    # Obtener servers pendientes de asignar
    if len(itemlist) > 0: itemlist = servertools.get_servers_itemlist(itemlist)

    # Descartar enlaces de youtube (coloquios, trailers, ...) y vsmobi, embedy (pq suelen fallar) a menos que no haya otros enlaces
    # tb videos (videos.2000peliculassigloxx.com)
    validos = len([
        it for it in itemlist if it.server not in
        ['desconocido', 'youtube', 'vsmobi', 'embedy', 'videos']
        and servertools.is_server_enabled(it.server)
    ])
    if validos > 0:
        itemlist = filter(
            lambda it: it.server not in ['youtube', 'vsmobi', 'embedy'],
            itemlist
        )  # mantener desconocido, videos para listarse en servers_todo

    return itemlist
コード例 #28
0
ファイル: hdfull.py プロジェクト: enursha101/xbmc-addon
def findvideos(item):
    logger.info()

    itemlist = []
    ## Carga estados
    status = jsontools.load_json(
        httptools.downloadpage(host + '/a/status/all').data)

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(
            " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(
                " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_library_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show +
                                           "[/B][/COLOR] )")
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))

            title_label = bbcode_kodi2html(
                " ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append(
                Item(channel=item.channel,
                     action="trailer",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show))

        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=True))

    data_js = httptools.downloadpage(
        "http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
    key = scrapertools.find_single_match(
        data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)

        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos(httptools.downloadpage(item.url).data)
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load_json(
        obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(
        data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year

    var0 = scrapertools.find_single_match(data_js,
                                          'var_0=\[(.*?)\]').split(",")
    matches = []
    for match in data_decrypt:
        prov = eval(
            scrapertools.find_single_match(
                data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
        function = prov["l"].replace("code", match["code"]).replace(
            "var_2", match["code"])
        index = scrapertools.find_single_match(function, 'var_1\[(\d+)\]')
        function = function.replace("var_1[%s]" % index, var0[int(index)])

        url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}")
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        index = scrapertools.find_single_match(prov["e"], 'var_1\[(\d+)\]')
        embed = prov["e"].replace("var_1[%s]" % index, var0[int(index)])

        matches.append([match["lang"], match["quality"], url, embed])

    enlaces = []
    for idioma, calidad, url, embed in matches:
        servername = scrapertools.find_single_match(
            url, "(?:http:|https:)//(?:www.|)([^.]+).")
        if servername == "streamin": servername = "streaminto"
        if servername == "waaw": servername = "netutv"
        if servername == "uploaded" or servername == "ul":
            servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            option = "Ver"
            if re.search(r'return ([\'"]{2,}|\})', embed):
                option = "Descargar"
            calidad = unicode(calidad, "utf8").upper().encode("utf8")
            servername_c = unicode(servername,
                                   "utf8").capitalize().encode("utf8")
            title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")"
            thumbnail = item.thumbnail
            plot = item.title + "\n\n" + scrapertools.find_single_match(
                data, '<meta property="og:description" content="([^"]+)"')
            plot = scrapertools.htmlclean(plot)
            fanart = scrapertools.find_single_match(
                data, '<div style="background-image.url. ([^\s]+)')
            if account:
                url += "###" + id + ";" + type

            enlaces.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     fulltitle=title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     fanart=fanart,
                     show=item.show,
                     folder=True,
                     server=servername,
                     infoLabels=infolabels,
                     contentTitle=item.contentTitle,
                     contentType=item.contentType,
                     tipo=option))

    enlaces.sort(key=lambda it: it.tipo, reverse=True)
    itemlist.extend(enlaces)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..."
        try:
            itemlist.extend(file_cine_library(item, url_targets))
        except:
            pass

    return itemlist
コード例 #29
0
ファイル: vixto.py プロジェクト: enursha101/xbmc-addon
def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item):
    logger.info()

    lista_enlaces = list()
    bloque = scrapertools.find_single_match(data, tipo + '(.*?)</table>')
    patron = '<td class="sape">\s*<i class="idioma-([^"]+)".*?href="([^"]+)".*?</p>.*?<td>([^<]+)</td>' \
             '.*?<td class="desaparecer">(.*?)</td>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    filtrados = []
    for language, scrapedurl, calidad, orden in matches:
        language = language.strip()
        server = scrapertools.find_single_match(
            scrapedurl, 'http(?:s|)://(?:www.|)(\w+).')
        if server == "ul":
            server = "uploadedto"
        if server == "streamin":
            server = "streaminto"
        if server == "waaw":
            server = "netutv"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(server)
        if mostrar_server:
            try:
                servers_module = __import__("servers." + server)
                title = "   Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip(
                ) + ")"
                if filtro_idioma == 3 or item.filtro:
                    lista_enlaces.append(
                        item.clone(title=title,
                                   action="play",
                                   server=server,
                                   text_color=color2,
                                   url=scrapedurl,
                                   idioma=language,
                                   orden=orden))
                else:
                    idioma = dict_idiomas[language]
                    if idioma == filtro_idioma:
                        lista_enlaces.append(
                            item.clone(title=title,
                                       text_color=color2,
                                       action="play",
                                       url=scrapedurl,
                                       server=server,
                                       idioma=language,
                                       orden=orden))
                    else:
                        if language not in filtrados:
                            filtrados.append(language)
            except:
                pass

    order = config.get_setting("orderlinks", item.channel)
    if order == 0:
        lista_enlaces.sort(key=lambda item: item.server)
    elif order == 1:
        lista_enlaces.sort(key=lambda item: item.idioma)
    else:
        lista_enlaces.sort(key=lambda item: item.orden, reverse=True)

    if filtro_idioma != 3:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(
                item.clone(title=title,
                           action="findvideos",
                           url=item.url,
                           text_color=color3,
                           filtro=True))

    return lista_enlaces
コード例 #30
0
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
    logger.info()
    lista_enlaces = []

    matches = []
    if type == "online":
        patron = '<a href="#([^"]+)" data-toggle="tab">([^<]+)</a>'
        bloques = scrapertools.find_multiple_matches(data, patron)
        for id, language in bloques:
            patron = 'id="' + id + '">.*?<iframe src="([^"]+)"'
            url = scrapertools.find_single_match(data, patron)
            matches.append([url, "", language])

    bloque2 = scrapertools.find_single_match(data, '<div class="table-link" id="%s">(.*?)</table>' % type)
    patron = 'tr>[^<]+<td>.*?href="([^"]+)".*?src.*?title="([^"]+)"' \
             '.*?src.*?title="([^"]+)".*?src.*?title="(.*?)"'
    matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
    filtrados = []
    for match in matches:
        scrapedurl = match[0]
        language = match[2].strip()
        if not match[1]:
            server = servertools.get_server_from_url(scrapedurl)
            title = "   Mirror en " + server + " (" + language + ")"
        else:
            server = match[1].lower()
            if server == "uploaded":
                server = "uploadedto"
            elif server == "streamin":
                server = "streaminto"
            elif server == "netu":
                server = "netutv"
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + server)
                except:
                    pass
            title = "   Mirror en " + server + " (" + language + ") (Calidad " + match[3].strip() + ")"

        if filtro_idioma == 3 or item.filtro:
            lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
                                            url=scrapedurl, idioma=language, extra=item.url))
        else:
            idioma = dict_idiomas[language]
            if idioma == filtro_idioma:
                lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",  url=scrapedurl,
                                                server=server, extra=item.url))
            else:
                if language not in filtrados:
                    filtrados.append(language)

    if filtro_idioma != 3:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
                                            filtro=True))

    return lista_enlaces
コード例 #31
0
ファイル: newpct1.py プロジェクト: juanfrias6272/juanfrias
def findvideos(item):
    logger.info()
    itemlist = []

    ## Cualquiera de las tres opciones son válidas
    #item.url = item.url.replace("1.com/","1.com/ver-online/")
    #item.url = item.url.replace("1.com/","1.com/descarga-directa/")
    item.url = item.url.replace("1.com/", "1.com/descarga-torrent/")

    # Descarga la página
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  httptools.downloadpage(item.url).data)
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    title = scrapertools.find_single_match(
        data, "<h1><strong>([^<]+)</strong>[^<]+</h1>")
    title += scrapertools.find_single_match(
        data, "<h1><strong>[^<]+</strong>([^<]+)</h1>")
    caratula = scrapertools.find_single_match(
        data, '<div class="entry-left">.*?src="([^"]+)"')

    #<a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>

    patron = '<a href="([^"]+)" title="[^"]+" class="btn-torrent" target="_blank">'

    # escraped torrent
    url = scrapertools.find_single_match(data, patron)
    if url != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 server="torrent",
                 title=title + " [torrent]",
                 fulltitle=title,
                 url=url,
                 thumbnail=caratula,
                 plot=item.plot,
                 folder=False))

    # escraped ver vídeos, descargar vídeos un link, múltiples liks
    data = data.replace("'", '"')
    data = data.replace(
        'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=',
        "")
    data = data.replace(
        "http://tumejorserie.com/descargar/url_encript.php?link=", "")
    data = data.replace("$!", "#!")

    patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
    patron_ver = '<div id="tab3"[^>]+>.*?</ul>'

    match_ver = scrapertools.find_single_match(data, patron_ver)
    match_descargar = scrapertools.find_single_match(data, patron_descargar)

    patron = '<div class="box1"><img src="([^"]+)".*?'  # logo
    patron += '<div class="box2">([^<]+)</div>'  # servidor
    patron += '<div class="box3">([^<]+)</div>'  # idioma
    patron += '<div class="box4">([^<]+)</div>'  # calidad
    patron += '<div class="box5"><a href="([^"]+)".*?'  # enlace
    patron += '<div class="box6">([^<]+)</div>'  # titulo

    enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
    enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
        servidor = servidor.replace("streamin", "streaminto")
        titulo = titulo + " [" + servidor + "]"
        if servertools.is_server_enabled(servidor):
            try:
                servers_module = __import__("servers." + servidor)
                server_module = getattr(servers_module, servidor)
                devuelve = server_module.find_videos(enlace)
                if devuelve:
                    enlace = devuelve[0][1]
                    itemlist.append(
                        Item(fanart=item.fanart,
                             channel=item.channel,
                             action="play",
                             server=servidor,
                             title=titulo,
                             fulltitle=item.title,
                             url=enlace,
                             thumbnail=logo,
                             plot=item.plot,
                             folder=False))
            except:
                pass

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
        servidor = servidor.replace("uploaded", "uploadedto")
        partes = enlace.split(" ")
        p = 1
        for enlace in partes:
            parte_titulo = titulo + " (%s/%s)" % (
                p, len(partes)) + " [" + servidor + "]"
            p += 1
            if servertools.is_server_enabled(servidor):
                try:
                    servers_module = __import__("servers." + servidor)
                    server_module = getattr(servers_module, servidor)
                    devuelve = server_module.find_videos(enlace)
                    if devuelve:
                        enlace = devuelve[0][1]
                        itemlist.append(
                            Item(fanart=item.fanart,
                                 channel=item.channel,
                                 action="play",
                                 server=servidor,
                                 title=parte_titulo,
                                 fulltitle=item.title,
                                 url=enlace,
                                 thumbnail=logo,
                                 plot=item.plot,
                                 folder=False))
                except:
                    pass
    return itemlist
コード例 #32
0
def findvideos(item):
    logger.info("pelisalacarta.channels.hdfull findvideos")

    itemlist = []

    ## Carga estados
    status = jsontools.load_json(
        scrapertools.cache_page(host + '/a/status/all'))

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(
            " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(
                " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_library_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show +
                                           "[/B][/COLOR] )")
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))

            title_label = bbcode_kodi2html(
                " ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append(
                Item(channel=item.channel,
                     action="trailer",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show))

        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=True))

    data = agrupa_datos(scrapertools.cache_page(item.url))

    patron = '<div class="embed-selector"[^<]+'
    patron += '<h5 class="left"[^<]+'
    patron += '<span[^<]+<b class="key">\s*Idioma.\s*</b>([^<]+)</span[^<]+'
    patron += '<span[^<]+<b class="key">\s*Servidor.\s*</b><b[^>]+>([^<]+)</b[^<]+</span[^<]+'
    patron += '<span[^<]+<b class="key">\s*Calidad.\s*</b>([^<]+)</span[^<]+</h5.*?'
    patron += '<a href="(http[^"]+)".*?'
    patron += '</i>([^<]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for idioma, servername, calidad, url, opcion in matches:
        opcion = opcion.strip()
        if opcion != "Descargar":
            opcion = "Ver"
        title = opcion + ": " + servername.strip() + " (" + calidad.strip(
        ) + ")" + " (" + idioma.strip() + ")"
        title = scrapertools.htmlclean(title)
        #Se comprueba si existe el conector y si se oculta en caso de premium
        servername = servername.lower().split(".")[0]

        if servername == "streamin": servername = "streaminto"
        if servername == "waaw": servername = "netutv"
        if servername == "ul": servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            try:
                servers_module = __import__("servers." + servername)
                thumbnail = item.thumbnail
                plot = item.title + "\n\n" + scrapertools.find_single_match(
                    data, '<meta property="og:description" content="([^"]+)"')
                plot = scrapertools.htmlclean(plot)
                fanart = scrapertools.find_single_match(
                    data, '<div style="background-image.url. ([^\s]+)')

                url += "###" + id + ";" + type

                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title=title,
                         fulltitle=title,
                         url=url,
                         thumbnail=thumbnail,
                         plot=plot,
                         fanart=fanart,
                         show=item.show,
                         folder=True))
            except:
                pass

    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..."
        try:
            itemlist.extend(file_cine_library(item, url_targets))
        except:
            pass

    return itemlist
コード例 #33
0
ファイル: torrentrapid.py プロジェクト: x7r6xx/repo
def findvideos(item):
    logger.info()
    itemlist = []
    ## Cualquiera de las tres opciones son válidas
    # item.url = item.url.replace(".com/",".com/ver-online/")
    # item.url = item.url.replace(".com/",".com/descarga-directa/")
    item.url = item.url.replace(".com/", ".com/descarga-torrent/")

    # Descarga la página
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  httptools.downloadpage(item.url).data)
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
    data = data.replace("$!", "#!").replace("'",
                                            "\"").replace("ñ", "ñ").replace(
                                                "//pictures", "/pictures")

    title = scrapertools.find_single_match(
        data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>"
    )  #corregido para adaptarlo a mispelisy.series.com
    title += scrapertools.find_single_match(
        data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>"
    )  #corregido para adaptarlo a mispelisy.series.com
    #caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
    caratula = scrapertools.find_single_match(data,
                                              '<h1.*?<img.*?src="([^"]+)')

    patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
    # escraped torrent
    url = scrapertools.find_single_match(data, patron)

    if item.infoLabels['year']:  #añadir el año al título general
        year = '[%s]' % str(item.infoLabels['year'])
    else:
        year = ""

    if item.infoLabels[
            'aired'] and item.contentType == "episode":  #añadir el año de episodio para series
        year = scrapertools.find_single_match(str(item.infoLabels['aired']),
                                              r'\/(\d{4})')
        year = '[%s]' % year

    title_gen = title
    if item.contentType == "episode":  #scrapear información duplicada en Series
        title = re.sub(r'Temp.*?\[', '[', title)
        title = re.sub(r'\[Cap.*?\]', '', title)
        title_epi = '%sx%s - %s' % (str(
            item.contentSeason), str(
                item.contentEpisodeNumber), item.contentTitle)
        title_gen = '%s %s, %s' % (title_epi, year, title)
        title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
    else:
        title_torrent = item.contentTitle

    if item.infoLabels['quality']:
        if not config.get_setting(
                "unify"):  #Si Titulos Inteligentes NO seleccionados:
            title_torrent = '%s [%s]' % (title_torrent,
                                         item.infoLabels['quality'])
        else:
            title_torrent = '%s (%s)' % (title_torrent,
                                         item.infoLabels['quality'])
    if not config.get_setting(
            "unify"):  #Si Titulos Inteligentes NO seleccionados:
        title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
    else:
        title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
    if config.get_setting(
            "quit_channel_name",
            "videolibrary") == 1 and item.contentChannel == "videolibrary":
        title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
    itemlist.append(
        item.clone(title=title_gen, action="",
                   folder=False))  #Título con todos los datos del vídeo

    title = title_torrent
    title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (
        title_torrent)
    if url != "":  #Torrent
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 server="torrent",
                 title=title_torrent,
                 fulltitle=title,
                 url=url,
                 thumbnail=caratula,
                 plot=item.plot,
                 infoLabels=item.infoLabels,
                 folder=False))

    logger.debug("TORRENT: url: " + url + " / title: " + title +
                 " / calidad: " + item.quality + " / context: " +
                 str(item.context))

    # escraped ver vídeos, descargar vídeos un link, múltiples liks

    data = data.replace(
        "http://tumejorserie.com/descargar/url_encript.php?link=", "(")
    data = re.sub(
        r'javascript:;" onClick="popup\("http:\/\/(?:www.)?torrentrapid.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=',
        "", data)

    # Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
    patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
    patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
    patron += '<\/div[^<]+<div class="box6">([^<]+)?<'

    enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
    enlaces_descargar = enlaces_ver
    #logger.debug(enlaces_ver)

    if len(enlaces_ver) > 0:
        if not config.get_setting(
                "unify"):  #Si Titulos Inteligentes NO seleccionados:
            itemlist.append(
                item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]",
                           action="",
                           folder=False))
        else:
            itemlist.append(
                item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]",
                           action="",
                           folder=False))

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
        if "Ver" in titulo:
            servidor = servidor.replace("streamin", "streaminto")
            titulo = title
            mostrar_server = True
            if config.get_setting("hidepremium"):
                mostrar_server = servertools.is_server_enabled(servidor)
            titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (
                servidor.capitalize(), titulo)
            logger.debug("VER: url: " + enlace + " / title: " + titulo +
                         " / servidor: " + servidor + " / idioma: " + idioma)

            if mostrar_server:
                try:
                    devuelve = servertools.findvideosbyserver(enlace, servidor)
                    if devuelve:
                        enlace = devuelve[0][1]
                        itemlist.append(
                            Item(fanart=item.fanart,
                                 channel=item.channel,
                                 action="play",
                                 server=servidor,
                                 title=titulo,
                                 fulltitle=title,
                                 url=enlace,
                                 thumbnail=logo,
                                 plot=item.plot,
                                 infoLabels=item.infoLabels,
                                 folder=False))
                except:
                    pass

    if len(enlaces_descargar) > 0:
        if not config.get_setting(
                "unify"):  #Si Titulos Inteligentes NO seleccionados:
            itemlist.append(
                item.clone(
                    title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]",
                    action="",
                    folder=False))
        else:
            itemlist.append(
                item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]",
                           action="",
                           folder=False))

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
        if "Ver" not in titulo:
            servidor = servidor.replace("uploaded", "uploadedto")
            partes = enlace.split(" ")
            titulo = "Descarga "
            p = 1
            logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo +
                         title + " / servidor: " + servidor + " / idioma: " +
                         idioma)
            for enlace in partes:
                parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
                p += 1
                mostrar_server = True
                if config.get_setting("hidepremium"):
                    mostrar_server = servertools.is_server_enabled(servidor)
                parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (
                    servidor.capitalize(), parte_titulo)
                if item.infoLabels['quality']:
                    if not config.get_setting(
                            "unify"
                    ):  #Si Titulos Inteligentes NO seleccionados:
                        parte_titulo = '%s [%s]' % (parte_titulo,
                                                    item.infoLabels['quality'])
                    else:
                        parte_titulo = '%s (%s)' % (parte_titulo,
                                                    item.infoLabels['quality'])
                if mostrar_server:
                    try:
                        devuelve = servertools.findvideosbyserver(
                            enlace, servidor)
                        if devuelve:
                            enlace = devuelve[0][1]
                            itemlist.append(
                                Item(fanart=item.fanart,
                                     channel=item.channel,
                                     action="play",
                                     server=servidor,
                                     title=parte_titulo,
                                     fulltitle=title,
                                     url=enlace,
                                     thumbnail=logo,
                                     plot=item.plot,
                                     infoLabels=item.infoLabels,
                                     folder=False))
                    except:
                        pass

    return itemlist
コード例 #34
0
ファイル: oranline.py プロジェクト: MoRgUiJu/morguiju.repo
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
    logger.info("pelisalacarta.channels.oranline bloque_enlaces")

    lista_enlaces = []
    bloque = scrapertools.find_single_match(
        data, '<div id="' + type + '">(.*?)</table>')
    patron = 'tr>[^<]*<td>.*?href="([^"]+)".*?<span>([^<]+)</span>' \
             '.*?<td>([^<]+)</td>.*?<td>([^<]+)</td>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    filtrados = []
    for scrapedurl, server, language, calidad in matches:
        language = language.strip()
        server = server.lower()
        if server == "ul":
            server = "uploadedto"
        if server == "streamin":
            server = "streaminto"
        if server == "waaw":
            server = "netutv"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(server)
        if mostrar_server:
            try:
                servers_module = __import__("servers." + server)
                title = "   Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip(
                ) + ")"
                if filtro_idioma == 4 or item.filtro or item.extra == "findvideos":
                    lista_enlaces.append(
                        item.clone(title=title,
                                   action="play",
                                   server=server,
                                   text_color=color2,
                                   url=scrapedurl,
                                   idioma=language))
                else:
                    idioma = dict_idiomas[language]
                    if idioma == filtro_idioma:
                        lista_enlaces.append(
                            item.clone(title=title,
                                       text_color=color2,
                                       action="play",
                                       url=scrapedurl,
                                       server=server))
                    else:
                        if language not in filtrados:
                            filtrados.append(language)
            except:
                pass

    if filtro_idioma != 4:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(
                item.clone(title=title,
                           action="findvideos",
                           url=item.url,
                           text_color=color3,
                           filtro=True))

    return lista_enlaces
コード例 #35
0
ファイル: descargasmix.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()
    if (item.extra and item.extra != "findvideos") or item.path:
        return epienlaces(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                       text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if enlaces and "peliculas.nu" not in enlaces:
                if i == 0:
                    extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title, action="", text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
    scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
                                       title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(item.clone(title="   Los enlaces se están subiendo", action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                           text_color="green"))
                continue
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist