예제 #1
0
def extract_safe(item):
    logger.info("pelisalacarta.channels.puyasubs extract_safe")
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
    itemlist = list()

    hash = item.url.rsplit("/", 1)[1]
    headers = [['Content-Type', 'application/json;charset=utf-8']]
    post = jsontools.dump_json({"hash": hash})
    data = scrapertools.downloadpage("http://safelinking.net/v1/protected",
                                     post, headers)
    data = jsontools.load_json(data)

    for link in data.get("links"):
        enlace = link["url"]
        domain = link["domain"]
        title = "Ver por %s" % domain
        action = "play"
        if "mega" in domain:
            server = "mega"
            if "/#F!" in enlace:
                action = "carpeta"

        elif "1fichier" in domain:
            server = "onefichier"
            if "/dir/" in enlace:
                action = "carpeta"

        itemlist.append(
            item.clone(title=title, action=action, url=enlace, server=server))

    return itemlist
예제 #2
0
파일: cinefox.py 프로젝트: dealex1/addon
def menu_info_episode(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    if item.show == "":
        item.show = scrapertools.find_single_match(data, 'class="h1-like media-title".*?>([^<]+)</a>')
    episode = scrapertools.find_single_match(data, '<span class="indicator">([^<]+)</span>')
    item.infoLabels["season"] = episode.split("x")[0]
    item.infoLabels["episode"] = episode.split("x")[1]
    tmdb.set_infoLabels_item(item, __modo_grafico__)
    if item.infoLabels["plot"] == "":
        sinopsis = scrapertools.find_single_match(data, 'id="episode-plot">(.*?)</p>')
        if not "No hay sinopsis" in sinopsis:
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
    title = "Ver enlaces %s - [" + item.show + " " + episode + "]"
    itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="episode", type="streaming"))
    itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="episode", type="download"))
    siguiente = scrapertools.find_single_match(data, '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"')
    if siguiente:
        titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]"
        itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=siguiente[0], extra="",
                                   text_color=color1))
    patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"'
    anterior = scrapertools.find_single_match(data, patron)
    if anterior:
        titulo = "<< Episodio Anterior - [" + anterior[1] + "]"
        itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=anterior[0], extra="",
                                   text_color=color3))
    url_serie = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
    url_serie += "/episodios"
    itemlist.append(item.clone(title="Ir a la lista de capítulos", action="episodios", url=url_serie, extra="",
                               text_color=color4))
    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))
    return itemlist
예제 #3
0
def enlaces(item):
    logger.info()
    item.extra = ""
    item.text_color = ""
    itemlist = []
    # Descarga la pagina
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}", '', data)
    item.fanart = scrapertools.find_single_match(
        data, "CUSTOM BACKGROUND.*?url\('([^']+)'")
    item.infoLabels["plot"] = scrapertools.find_single_match(
        data, 'dt>Sinopsis</dt> <dd class=[^>]+>(.*?)</dd>')
    year = scrapertools.find_single_match(data,
                                          '<dt>Estreno</dt> <dd>(\d+)</dd>')

    try:
        from core import tmdb
        item.infoLabels['year'] = int(year)
        # Obtenemos los datos basicos de todas las peliculas mediante multihilos
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    except:
        pass

    filtro_idioma = config.get_setting("filterlanguages", item.channel)
    filtro_enlaces = config.get_setting("filterlinks", item.channel)

    dict_idiomas = {'CAST': 2, 'LAT': 1, 'VOSE': 0}

    if filtro_enlaces != 0:
        itemlist.append(
            item.clone(action="",
                       title="Enlaces Online",
                       text_color="dodgerblue",
                       text_bold=True))
        itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist,
                                  "ss", item)
    if filtro_enlaces != 1:
        itemlist.append(
            item.clone(action="",
                       title="Enlaces Descarga",
                       text_color="dodgerblue",
                       text_bold=True))
        itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist,
                                  "dd", item)

    trailer_id = scrapertools.find_single_match(
        data, 'data:\s*\{\s*id:\s*"([^"]+)"')
    data_trailer = scrapertools.downloadpage("http://pelisdanko.com/trailer",
                                             post="id=%s" % trailer_id)
    url_trailer = scrapertools.find_single_match(data_trailer, 'src="([^"]+)"')
    if url_trailer != "":
        url_trailer = url_trailer.replace("embed/", "watch?v=")
        item.infoLabels['trailer'] = url_trailer
        itemlist.append(
            item.clone(channel="trailertools",
                       action="buscartrailer",
                       title="Buscar Tráiler",
                       text_color="magenta"))

    return itemlist
예제 #4
0
def findvideos(item):
    logger.info()
    itemlist = []
    if item.infoLabels["tmdb_id"]:
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    data = httptools.downloadpage(item.url).data
    if "data:text/javascript;base64" in data:
        div_id = scrapertools.find_single_match(data,
                                                '<div id="([0-9a-fA-F]+)"')
        # ~ logger.info(div_id)
        vid_id = scrapertools.find_single_match(decodifica_id(div_id),
                                                ':"([^"]+)"')
        # ~ logger.info(vid_id)
        itemlist.append(
            item.clone(url='http://netu.tv/watch_video.php?v=' + vid_id,
                       server='netutv',
                       action='play'))
    else:
        iframe = scrapertools.find_single_match(
            data, '<iframe width="720".*?src="([^"]+)"')
        data = data.replace(
            "googleusercontent",
            "malo")  # para que no busque enlaces erroneos de gvideo
        if "goo.gl/" in iframe:
            data += httptools.downloadpage(iframe,
                                           follow_redirects=False,
                                           only_headers=True).headers.get(
                                               "location", "")
        itemlist = servertools.find_video_items(item, data)
    if config.get_videolibrary_support():
        itemlist.append(
            item.clone(action="add_pelicula_to_library",
                       title="Añadir película a la videoteca"))
    return itemlist
예제 #5
0
def peliculas(item):
    logger.info()
    itemlist = []

    if item.extra == "completo":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = httptools.downloadpage(item.url).data
    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(
            data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')

    bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
    matches = scrapertools.find_multiple_matches(
        bloque, '<li><a href="([^"]+)" title="([^"]+)"')
    if len(matches) == 1:
        item.url = host + matches[0][0]
        itemlist = findvideos(item)
    else:
        for url, title in matches:
            itemlist.append(
                item.clone(action="findvideos", title=title, url=url,
                           extra=""))

    return itemlist
예제 #6
0
def temporadas(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    try:
        from core import tmdb
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    except:
        pass

    matches = scrapertools.find_multiple_matches(data, '<a class="movie-season" data-id="([^"]+)"')
    matches = list(set(matches))
    for season in matches:
        item.infoLabels['season'] = season
        itemlist.append(item.clone(action="episodios", title="Temporada "+season, context=["buscar_trailer"], contentType="season"))

    itemlist.sort(key=lambda item: item.title)
    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    if not "trailer" in item.infoLabels:
        trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
        item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")

    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))
    
    return itemlist
예제 #7
0
파일: cinefox.py 프로젝트: dealex1/addon
def menu_info(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
    if year != "" and not "tmdb_id" in item.infoLabels:
        item.infoLabels["year"] = year
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    if item.infoLabels["plot"] == "":
        sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
        item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id).json
    trailer_url = data_trailer["video"]["url"]
    if trailer_url != "":
        item.infoLabels["trailer"] = trailer_url
    title = "Ver enlaces %s - [" + item.contentTitle + "]"
    itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="media", type="streaming"))
    itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="media", type="download"))
    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))
    if config.get_videolibrary_support():
        itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
                             title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
                             fanart=item.fanart, fulltitle=item.fulltitle,
                             extra="media|"))
    return itemlist
예제 #8
0
def series_por_letra_y_grupo(item):
    logger.info("letra: %s - grupo: %s" % (item.letter, item.extra))
    itemlist = []
    url = urlparse.urljoin(HOST, "autoload_process.php")
    post_request = {
        "group_no": item.extra,
        "letra": item.letter.lower()
    }
    data = httptools.downloadpage(url, post=urllib.urlencode(post_request)).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    patron = '<div class=list_imagen><img src=(.*?) \/>.*?<div class=list_titulo><a href=(.*?) style=.*?inherit;>(.*?)'
    patron +='<.*?justify>(.*?)<.*?Año:<\/b>.*?(\d{4})<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for img, url, name, plot, year in matches:
        new_item= Item(
            channel = item.channel,
            action="seasons",
            title=name,
            show=name,
            url=urlparse.urljoin(HOST, url),
            thumbnail=urlparse.urljoin(HOST, img),
            context=filtertools.context(item, list_idiomas, list_quality),
            plot = plot,
            infoLabels={'year':year}
        )
        if year:
           tmdb.set_infoLabels_item(new_item)
        itemlist.append(new_item)
    if len(matches) == 8:
        itemlist.append(item.clone(title="Siguiente >>", action="series_por_letra_y_grupo", extra=item.extra + 1))
    if item.extra > 0:
        itemlist.append(item.clone(title="<< Anterior", action="series_por_letra_y_grupo", extra=item.extra - 1))
    return itemlist
예제 #9
0
파일: search.py 프로젝트: martinbm76/addon
def get_from_temp(item):
    logger.info()

    n = 30
    nextp = n * item.page
    prevp = n * (item.page - 1)

    temp_path = os.path.join(config.get_data_path(), 'temp_search.json')
    results = list()

    with open(temp_path, "r") as temp_list:
        from_temp = json.load(temp_list)

    for elem in from_temp[item.from_channel]:
        results.append(Item().fromurl(elem))

    old_results = results
    results = results[prevp:nextp]

    if len(results) == n and len(old_results * item.page) != n:
        results.append(
            Item(channel='search',
                 title='[COLOR yellow]Pagina Siguiente >>[/COLOR]',
                 action='get_from_temp',
                 from_channel=item.from_channel,
                 page=item.page + 1))

    tmdb.set_infoLabels_itemlist(results, True)
    for elem in results:
        if not elem.infoLabels.get('year', ""):
            elem.infoLabels['year'] = '-'
            tmdb.set_infoLabels_item(elem, True)

    return results
예제 #10
0
파일: search.py 프로젝트: Muzic98/addon
def get_from_temp(item):
    logger.debug()

    n = 30
    nTotal = len(item.itemlist)
    nextp = n * item.page
    prevp = n * (item.page - 1)

    results = [Item().fromurl(elem) for elem in item.itemlist[prevp:nextp]]

    if nextp < nTotal:
        results.append(
            Item(channel='search',
                 title=typo(config.get_localized_string(30992),
                            'bold color kod'),
                 action='get_from_temp',
                 itemlist=item.itemlist,
                 page=item.page + 1))

    tmdb.set_infoLabels_itemlist(results, True)
    for elem in results:
        if not elem.infoLabels.get('year', ""):
            elem.infoLabels['year'] = '-'
            tmdb.set_infoLabels_item(elem, True)

    return results
예제 #11
0
def temporadas(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    try:
        from core import tmdb
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    except:
        pass

    matches = scrapertools.find_multiple_matches(data, '<a class="movie-season" data-id="([^"]+)"')
    matches = list(set(matches))
    for season in matches:
        item.infoLabels['season'] = season
        itemlist.append(item.clone(action="episodios", title="Temporada "+season, context=["buscar_trailer"], contentType="season"))

    itemlist.sort(key=lambda item: item.title)
    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    if not "trailer" in item.infoLabels:
        trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
        item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")

    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))
    
    return itemlist
예제 #12
0
파일: search.py 프로젝트: linuxvalley/addon
def get_channel_results(item, module_dict, search_action):
    ch = search_action.channel
    results = list()
    valid = list()
    module = module_dict[ch]
    searched_id = item.infoLabels['tmdb_id']

    try:
        results.extend(module.search(search_action, item.text))
        if len(results) == 1:
            if not results[0].action or config.get_localized_string(70006).lower() in results[0].title.lower():
                results.clear()
        if item.mode != 'all':
            for elem in results:
                if not elem.infoLabels.get('year', ""):
                    elem.infoLabels['year'] = '-'
                tmdb.set_infoLabels_item(elem)
                if elem.infoLabels['tmdb_id'] == searched_id:
                    elem.from_channel = ch
                    if not config.get_setting('unify'):
                        elem.title += ' [%s]' % ch
                    valid.append(elem)

        # if len(results) < 0 and len(results) < max_results and item.mode != 'all':
        #
        #     if len(results) == 1:
        #         if not results[0].action or config.get_localized_string(30992).lower() in results[0].title.lower():
        #             return [ch, []]
        #
        #     results = get_info(results)

        return [search_action, results, valid]
    except:
        return [search_action, results, valid]
예제 #13
0
def extract_safe(item):
    logger.info("pelisalacarta.channels.puyasubs extract_safe")
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
    itemlist = list()
    
    hash = item.url.rsplit("/", 1)[1]
    headers = [['Content-Type', 'application/json;charset=utf-8']]
    post = jsontools.dump_json({"hash": hash})
    data = scrapertools.downloadpage("http://safelinking.net/v1/protected", post, headers)
    data = jsontools.load_json(data)

    for link in data.get("links"):
        enlace = link["url"]
        domain = link["domain"]
        title = "Ver por %s" % domain
        action = "play"
        if "mega" in domain:
            server = "mega"
            if "/#F!" in enlace:
                action = "carpeta"

        elif "1fichier" in domain:
            server = "onefichier"
            if "/dir/" in enlace:
                action = "carpeta"

        itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
    
    return itemlist
예제 #14
0
파일: infoplus.py 프로젝트: Muzic98/addon
def add_infoLabels(movie):
    it = Item(title=movie['title'], infoLabels=movie, contentType=movie['mediatype'])
    tmdb.set_infoLabels_item(it, True)
    movie=it.infoLabels
    item = xbmcgui.ListItem(movie['title'])
    for key, value in movie.items():
        item.setProperty(key, str(value))
    return item
예제 #15
0
def findvideos(item):
    logger.info("pelisalacarta.channels.oranline findvideos")
    itemlist = []

    try:
        filtro_idioma = config.get_setting("filterlanguages", item.channel)
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_idioma = 4
        filtro_enlaces = 2

    dict_idiomas = {'Español': 3, 'Latino': 2, 'VOSE': 1, 'Inglés': 0}

    data = scrapertools.downloadpage(item.url)
    year = scrapertools.find_single_match(data, 'Año de lanzamiento.*?href.*?>(\d+)</a>')

    if year != "":
        item.infoLabels['filtro'] = ""
        item.infoLabels['year'] = int(year)

        if item.infoLabels['plot'] == "":
            # Ampliamos datos en tmdb
            try:
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

    if item.infoLabels['plot'] == "":
        plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>.*?>(.*?)</p>')
        item.infoLabels['plot'] = plot

    if filtro_enlaces != 0:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
        if list_enlaces:
            itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
                                       text_blod=True))
            itemlist.extend(list_enlaces)
    if filtro_enlaces != 1:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "descarga", item)
        if list_enlaces:
            itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
                                       text_blod=True))
            itemlist.extend(list_enlaces)

    # Opción "Añadir esta película a la biblioteca de XBMC"
    if itemlist:
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))    
        if item.extra != "findvideos":
            if config.get_library_support():
                itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la biblioteca", text_color="green",
                                     filtro=True, action="add_pelicula_to_library", fulltitle=item.fulltitle,
                                     extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle}))
    
    else:
        itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))

    return itemlist
예제 #16
0
def addFavourite(item):
    logger.info()

    # Si se llega aquí mediante el menú contextual, hay que recuperar los parámetros action y channel
    if item.from_action: item.__dict__['action'] = item.__dict__.pop('from_action')
    if item.from_channel: item.__dict__['channel'] = item.__dict__.pop('from_channel')

    if item.contentType not in ['movie', 'tvshow', 'season', 'episode']:
        platformtools.dialog_ok(config.__addon_name, 'Solamente para películas, series, temporadas o capítulos!')
        return False

    # Si no está definido tmdb_id seleccionar
    if item.contentType in ['movie', 'tvshow'] and not item.infoLabels['tmdb_id']:
        tipo = 'película' if item.contentType == 'movie' else 'serie'
        platformtools.dialog_ok(config.__addon_name, 'La %s no está identificada en TMDB.' % tipo, 'Si hay varias opciones posibles escoge una de ellas y sino cambia el texto de búsqueda.')
        from core import tmdb
        ret = tmdb.dialog_find_and_set_infoLabels(item)
        if not ret: return False # Se ha cancelado
    
    # Si está activada la confirmación de tmdb_id
    elif config.get_setting('tracking_confirm_tmdbid', default=False):
        if item.contentType in ['movie', 'tvshow']:
            from core import tmdb
            ret = tmdb.dialog_find_and_set_infoLabels(item)
            if not ret: return False # Se ha cancelado
        else: # para temporadas/episodios no perder season/episode
            it_ant = item.clone()
            from core import tmdb
            ret = tmdb.dialog_find_and_set_infoLabels(item)
            if not ret: return False # Se ha cancelado
            item.contentType = it_ant.contentType
            item.contentSeason = it_ant.contentSeason
            if it_ant.contentEpisodeNumber: item.contentEpisodeNumber = it_ant.contentEpisodeNumber

    # Si es una película/serie, completar información de tmdb si no se tiene activado tmdb_plus_info (para season/episodio no hace falta pq ya se habrá hecho la "segunda pasada")
    if item.contentType in ['movie', 'tvshow'] and not config.get_setting('tmdb_plus_info', default=False):
        from core import tmdb
        tmdb.set_infoLabels_item(item) # obtener más datos en "segunda pasada" (actores, duración, ...)

    # Guardar datos de serie/temporadas/episodios para el canal pedido
    if item.contentType == 'movie': tit = 'Guardando película'; sub = 'Obteniendo datos ...'
    elif item.contentType == 'tvshow': tit = 'Guardando serie'; sub = 'Obteniendo datos de temporadas y episodios ...'
    elif item.contentType == 'season': tit = 'Guardando temporada'; sub = 'Obteniendo datos de episodios ...'
    else: tit = 'Guardando episodio'; sub = 'Obteniendo datos ...'

    platformtools.dialog_notification(tit, sub)
    if item.contentType == 'movie':
        done, msg = trackingtools.scrap_and_save_movie(item.clone())
    else:
        done, msg = trackingtools.scrap_and_save_tvshow(item.clone())

    if not done:
        platformtools.dialog_ok(config.__addon_name, 'No se pueden añadir enlaces ...', msg)
        return False

    tit = item.contentTitle if item.contentType == 'movie' else item.contentSerieName
    platformtools.dialog_notification(tit, 'Añadidos enlaces del canal [COLOR blue]%s[/COLOR]' % item.channel)
    return True
예제 #17
0
def menupelis(item):
    logger.info(item.url)

    itemlist = []

    data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')

    if item.extra == '':
        section = 'Recién Agregadas'
    elif item.extra == 'year':
        section = 'del Año \d{4}'
    elif item.extra == 'adult':
        section = 'de Eróticas \+18'
    else:
        section = 'de %s'%item.extra

    patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section


    matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)

    for bloque_enlaces in matchesenlaces:

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
        patron += '<img src="(.*?)"'
        matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)

        for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
            logger.info("He encontrado el segundo bloque")
            logger.info("extra_info: %s" % extra_info)
            title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
            title = title.replace("Online", "");
            url = urlparse.urljoin(item.url, scrapedurl)
            thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
            quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
            year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
            language = scrapertools.find_multiple_matches(extra_info, 'class="(latino|espanol|subtitulado)"')
            # if language = 'ingles':
            #    language='vo'
            new_item=Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
                                 thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
                                 infoLabels={'year': year})
            if year:
                tmdb.set_infoLabels_item(new_item)

            itemlist.append(new_item)

    try:
        next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
        title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
        itemlist.append(
                Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail,
                     fanart=item.fanart, folder=True, extra=item.extra))
    except:
        pass
    return itemlist
예제 #18
0
파일: cinefox.py 프로젝트: x7r6xx/repo
def findvideos(item):
    logger.info()
    itemlist = []
    if not "|" in item.extra and not __menu_info__:
        data = httptools.downloadpage(item.url, add_referer=True).data
        year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
        itemlist=(get_enlaces(item, url, "Online"))
        url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id).data
            trailer_url = jsontools.load(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                       text_color="magenta", context=""))

            if config.get_videolibrary_support() and not "|" in item.extra:
                itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
                                     title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
                                     fanart=item.fanart, fulltitle=item.fulltitle,
                                     extra="media|"))
    else:
        url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
        type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

        # Requerido para FilterTools

        itemlist = filtertools.get_links(itemlist, item, list_language)

        # Requerido para AutoPlay

        autoplay.start(itemlist, item)

    return itemlist
예제 #19
0
def findvideos(item):
    from core import servertools

    if item.infoLabels["tmdb_id"]:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    data = httptools.downloadpage(item.url).data
    iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
    if "goo.gl/" in iframe:
        data += httptools.downloadpage(iframe,
                                       follow_redirects=False,
                                       only_headers=True).headers.get(
                                           "location", "")
    itemlist = servertools.find_video_items(item, data)

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support():
        title = "Añadir película a la videoteca"
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                movie_path = filetools.join(config.get_videolibrary_path(),
                                            'CINE')
                files = filetools.walk(movie_path)
                for dirpath, dirname, filename in files:
                    for f in filename:
                        if item.infoLabels["imdb_id"] in f and f.endswith(
                                ".nfo"):
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(
                                filetools.join(dirpath, dirname, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "clasicofilm" in canales:
                                canales.pop(canales.index("clasicofilm"))
                                canales.insert(
                                    0, "[COLOR red]clasicofilm[/COLOR]")
                            title = "Película ya en tu videoteca. [%s] ¿Añadir?" % ",".join(
                                canales)
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())

        itemlist.append(
            item.clone(action="add_pelicula_to_library", title=title))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        itemlist.append(
            item.clone(channel="tvmoviedb",
                       title="[Trakt] Gestionar con tu cuenta",
                       action="menu_trakt",
                       extra="movie"))

    return itemlist
예제 #20
0
def findvideos(item):
    logger.info("pelisalacarta.channels.verseriesynovelas findvideos")
    itemlist = []
    item.text_color = color3

    if item.extra == "newest" and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    if "valida el captcha" in data:
        logueado, error = login()
        data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)    
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

    bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>')
    for match in bloque:
        patron = 'data-th="Calidad">(.*?)<.*?' \
                 '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \
                 '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/link/enlaces.php.*?)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for quality, server, url in matches:
            if server == "streamin":
                server = "streaminto"
            if server== "waaw":
                server = "netutv"
            if server == "ul":
                server = "uploadedto"
            try:
                servers_module = __import__("servers."+server)
                title = "Ver vídeo en "+server+"  ["+quality+"]"
                if "Español.png" in match:
                    title += " [CAST]"
                if "VOS.png" in match:
                    title += " [VOSE]"
                if "Latino.png" in match:
                    title += " [LAT]"
                if "VO.png" in match:
                    title += " [V.O]"
                itemlist.append(item.clone(action="play", title=title, url=url))
            except:
                pass

    if not itemlist: 
        itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace"))
    if item.extra != "episodios":
        url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"')
        if url_lista != "":
            itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista,
                                       text_color="red", context=""))

    return itemlist
예제 #21
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    if item.extra == "newest" and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = httptools.downloadpage(item.url).data
    if "valida el captcha" in data:
        logueado, error = login(check_login=False)
        data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

    bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>')
    for match in bloque:
        patron = 'data-th="Calidad">(.*?)<.*?' \
                 '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \
                 '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/link/enlaces.php.*?)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for quality, server, url in matches:
            if server == "streamin":
                server = "streaminto"
            if server== "waaw":
                server = "netutv"
            if server == "ul":
                server = "uploadedto"
            try:
                servers_module = __import__("servers."+server)
                title = "Ver vídeo en "+server+"  ["+quality+"]"
                if "Español.png" in match:
                    title += " [CAST]"
                if "VOS.png" in match:
                    title += " [VOSE]"
                if "Latino.png" in match:
                    title += " [LAT]"
                if "VO.png" in match:
                    title += " [V.O]"
                itemlist.append(item.clone(action="play", title=title, url=url, server=server))
            except:
                pass

    if not itemlist: 
        itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace"))
    if item.extra != "episodios":
        url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"')
        if url_lista != "":
            itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista,
                                       text_color="red", context=""))

    return itemlist
예제 #22
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    if item.extra == "newest" and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = httptools.downloadpage(item.url).data
    if "valida el captcha" in data:
        logueado, error = login(check_login=False)
        data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

    bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>')
    for match in bloque:
        patron = 'data-th="Calidad">(.*?)<.*?' \
                 '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \
                 '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/link/enlaces.php.*?)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for quality, server, url in matches:
            video_data = httptools.downloadpage(url).data
            url_redirect = scrapertools.find_single_match(video_data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            location = httptools.downloadpage(url_redirect, follow_redirects=False, only_headers=True).headers["location"]
            
            title = "Ver vídeo en %s  ["+quality+"]"
            if "Español.png" in match:
                title += " [CAST]"
            if "VOS.png" in match:
                title += " [VOSE]"
            if "Latino.png" in match:
                title += " [LAT]"
            if "VO.png" in match:
                title += " [V.O]"
            itemlist.append(item.clone(action="play", title=url, url=location))
    
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)


    if not itemlist: 
        itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace"))
    if item.extra != "episodios":
        url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"')
        if url_lista != "":
            itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista,
                                       text_color="red", context=""))

    return itemlist
예제 #23
0
def findvideos(item):
    logger.info("pelisalacarta.channels.verseriesynovelas findvideos")
    itemlist = []
    item.text_color = color3

    if item.extra == "newest" or item.category == "":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = scrapertools.anti_cloudflare(item.url, host=CHANNEL_HOST, headers=CHANNEL_HEADERS)
    data = data.replace("\n", "").replace("\t", "")

    bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>')
    for match in bloque:
        patron = '.*?data-th="Calidad">(.*?)<.*?' \
                 '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \
                 '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/enlaces.php.*?)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for quality, server, url in matches:
            if server == "streamin":
                server = "streaminto"
            if server== "waaw":
                server = "netutv"
            if server == "ul":
                server = "uploadedto"
            try:
                servers_module = __import__("servers."+server)
                title = "Ver vídeo en "+server+"  ["+quality+"]"
                if "Español.png" in match:
                    title += " [CAST]"
                if "VOS.png" in match:
                    title += " [VOSE]"
                if "Latino.png" in match:
                    title += " [LAT]"
                if "VO.png" in match:
                    title += " [V.O]"
                itemlist.append(item.clone(action="play", title=title, url=url))
            except:
                pass

    if not itemlist: 
        itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace"))
    if item.category != "":
        url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"')
        if url_lista != "":
            itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista,
                                       text_color="red", context=""))

    return itemlist
예제 #24
0
def findvideos(item):
    logger.info("pelisalacarta.channels.cinefox findvideos")
    itemlist = []

    if not "|" in item.extra and not __menu_info__:
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass
    
        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
        itemlist.extend(get_enlaces(item, url, "Online"))
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = scrapertools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
                                                     headers=headers.items())
            trailer_url = jsontools.load_json(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                       text_color="magenta", context=""))

            if config.get_library_support() and not "|" in item.extra:
                itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
                                     title="Añadir película a la biblioteca", url=item.url, thumbnail=item.thumbnail,
                                     fanart=item.fanart, fulltitle=item.fulltitle,
                                     extra="media|"))
    else:
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
        type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

    return itemlist
예제 #25
0
def findvideos(item):
    logger.info()
    itemlist = []

    tmdb.set_infoLabels_item(item, __modo_grafico__)
    data = httptools.downloadpage(item.url).data

    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(
            data, '<div class="Description">.*?<p>(.*?)</p>')
    fanart = scrapertools.find_single_match(
        data, '<img class="TPostBg" src="([^"]+)"')
    if not item.fanart and fanart:
        item.fanart = fanart

    patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for tipo, source, title in matches:
        if tipo == "trailer":
            continue
        post = "source=%s&action=obtenerurl" % urllib.quote(source)
        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
        data_url = httptools.downloadpage(host + 'wp-admin/admin-ajax.php',
                                          post,
                                          headers=headers).data
        url = jsontools.load(data_url).get("url")

        if 'openload' in url:
            url = url + '|' + item.url
        extra_info = title.split(' - ')
        title = "%s - %s" % ('%s', title)
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 url=url,
                 title=title,
                 language=extra_info[0],
                 quality=extra_info[1],
                 text_color=color3))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(
            item.clone(title="Añadir película a la videoteca",
                       action="add_pelicula_to_library",
                       extra="findvideos",
                       text_color="green"))

    return itemlist
예제 #26
0
def findvideos(item):
    from core import servertools

    if item.infoLabels["tmdb_id"]:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    itemlist = servertools.find_video_items(item)

    library_path = config.get_library_path()
    if config.get_library_support():
        title = "Añadir película a la biblioteca"
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                movie_path = filetools.join(config.get_library_path(), 'CINE')
                files = filetools.walk(movie_path)
                for dirpath, dirname, filename in files:
                    for f in filename:
                        if item.infoLabels["imdb_id"] in f and f.endswith(
                                ".nfo"):
                            from core import library
                            head_nfo, it = library.read_nfo(
                                filetools.join(dirpath, dirname, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "clasicofilm" in canales:
                                canales.pop(canales.index("clasicofilm"))
                                canales.insert(
                                    0, "[COLOR red]clasicofilm[/COLOR]")
                            title = "Película ya en tu biblioteca. [%s] ¿Añadir?" % ",".join(
                                canales)
                            break
            except:
                import traceback
                logger.info(traceback.format_exc())
                pass

        itemlist.append(
            item.clone(action="add_pelicula_to_library", title=title))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        itemlist.append(
            item.clone(channel="tvmoviedb",
                       title="[Trakt] Gestionar con tu cuenta",
                       action="menu_trakt",
                       extra="movie"))

    return itemlist
예제 #27
0
def findvideos(item):
    logger.info()
    itemlist = []

    tmdb.set_infoLabels_item(item, __modo_grafico__)
    data = httptools.downloadpage(item.url).data

    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(
            data, '<div class="Description">.*?<p>(.*?)</p>')
    fanart = scrapertools.find_single_match(
        data, '<img class="TPostBg" src="([^"]+)"')
    if not item.fanart and fanart:
        item.fanart = fanart

    patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for tipo, source, title in matches:
        if tipo == "trailer":
            continue
        post = "source=%s&action=obtenerurl" % urllib.quote(source)
        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
        data_url = httptools.downloadpage(host + 'wp-admin/admin-ajax.php',
                                          post,
                                          headers=headers).data
        url = jsontools.load_json(data_url).get("url")
        if "online.desmix" in url or "metiscs" in url:
            server = "directo"
        elif "openload" in url:
            server = "openload"
            url += "|Referer=" + item.url
        else:
            server = servertools.get_server_from_url(url)
        title = "%s - %s" % (unicode(
            server, "utf8").capitalize().encode("utf8"), title)
        itemlist.append(
            item.clone(action="play",
                       url=url,
                       title=title,
                       server=server,
                       text_color=color3))

    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(
            item.clone(title="Añadir película a la biblioteca",
                       action="add_pelicula_to_library",
                       extra="findvideos",
                       text_color="green"))

    return itemlist
예제 #28
0
def menu_info_episode(item):
    logger.info("pelisalacarta.channels.cinefox menu_info_episode")
    itemlist = []
    
    data = scrapertools.downloadpage(item.url, headers=headers.items())
    if item.show == "":
        item.show = scrapertools.find_single_match(data, 'class="h1-like media-title".*?>([^<]+)</a>')

    episode = scrapertools.find_single_match(data, '<span class="indicator">([^<]+)</span>')
    item.infoLabels["season"] = episode.split("x")[0]
    item.infoLabels["episode"] = episode.split("x")[1]

    try:
        from core import tmdb
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    except:
        pass
    
    if item.infoLabels["plot"] == "":
        sinopsis = scrapertools.find_single_match(data, 'id="episode-plot">(.*?)</p>')
        if not "No hay sinopsis" in sinopsis: 
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    title = "Ver enlaces %s - [" + item.show + " " + episode + "]"
    itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="episode", type="streaming"))
    itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="episode", type="download"))

    siguiente = scrapertools.find_single_match(data, '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"')
    if siguiente:
        titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]"
        itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=siguiente[0], extra="",
                                   text_color=color1))

    patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"'
    anterior = scrapertools.find_single_match(data, patron)
    if anterior:
        titulo = "<< Episodio Anterior - [" + anterior[1] + "]"
        itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=anterior[0], extra="",
                                   text_color=color3))

    url_serie = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
    url_serie += "/episodios"
    itemlist.append(item.clone(title="Ir a la lista de capítulos", action="episodios", url=url_serie, extra="",
                               text_color=color4))

    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))

    return itemlist
예제 #29
0
def enlaces(item):
    logger.info("pelisalacarta.channels.pelisdanko enlaces")
    item.extra = ""
    item.text_color = ""
    itemlist = []
    # Descarga la pagina
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}", '', data)
    item.fanart = scrapertools.find_single_match(data, "CUSTOM BACKGROUND.*?url\('([^']+)'")
    item.infoLabels["plot"] = scrapertools.find_single_match(data, 'dt>Sinopsis</dt> <dd class=[^>]+>(.*?)</dd>')
    year = scrapertools.find_single_match(data, '<dt>Estreno</dt> <dd>(\d+)</dd>')

    try:
        from core import tmdb
        item.infoLabels['year'] = int(year)
        # Obtenemos los datos basicos de todas las peliculas mediante multihilos
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    except:
        pass

    filtro_idioma = config.get_setting("filterlanguages", item.channel)
    filtro_enlaces = config.get_setting("filterlinks", item.channel)

    dict_idiomas = {'CAST': 2, 'LAT': 1, 'VOSE': 0}

    if filtro_enlaces != 0:
        itemlist.append(item.clone(action="", title="Enlaces Online", text_color="dodgerblue", text_bold=True))
        itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "ss", item)
    if filtro_enlaces != 1:
        itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color="dodgerblue", text_bold=True))
        itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "dd", item)

    trailer_id = scrapertools.find_single_match(data, 'data:\s*\{\s*id:\s*"([^"]+)"')
    data_trailer = scrapertools.downloadpage("http://pelisdanko.com/trailer", post="id=%s" % trailer_id)
    url_trailer = scrapertools.find_single_match(data_trailer, 'src="([^"]+)"')
    if url_trailer != "":
        url_trailer = url_trailer.replace("embed/", "watch?v=")
        item.infoLabels['trailer'] = url_trailer
        itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                   text_color="magenta"))

    if config.get_library_support() and len(itemlist) > 0 and item.category != "Cine":
        itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
                             infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library",
                             fulltitle=item.fulltitle, text_color="green", extra="enlaces"))

    return itemlist
예제 #30
0
def episodios(item):
    logger.info("pelisalacarta.channels.descargasmix episodios")
    itemlist = []

    if item.category != "" and item.action != "add_serie_to_library":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass
    else:
        item.infoLabels = {}

    data = scrapertools.downloadpage(item.url)
    patron = '(<ul class="menu" id="seasons-list">.*?<div class="section-box related-posts">)'
    bloque = scrapertools.find_single_match(data, patron)
    matches = scrapertools.find_multiple_matches(bloque, '<div class="cap">(.*?)</div>')
    for scrapedtitle in matches:
        scrapedtitle = scrapedtitle.strip()
        item.infoLabels['season'] = scrapedtitle.split("x")[0]
        item.infoLabels['episode'] = scrapedtitle.split("x")[1]
        title = item.fulltitle+" "+scrapedtitle.strip()
        itemlist.append(item.clone(action="epienlaces", title=title, extra=scrapedtitle))

    itemlist.sort(key=lambda item: item.title, reverse=True)
    item.plot = scrapertools.find_single_match(data, '<strong>SINOPSIS</strong>:(.*?)</p>')
    if item.show != "":
        item.infoLabels['season'] = ""
        item.infoLabels['episode'] = ""
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))
        if item.category != "" and item.action != "add_serie_to_library":
            if config.get_library_support():
                itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la biblioteca", url=item.url,
                                     action="add_serie_to_library", extra="episodios", show=item.show,
                                     text_color="green"))

        if "tmbd_id" in item.infoLabels:
            try:
                from core import tmdb
                tmdb.set_infoLabels_itemlist(itemlist[:-2], __modo_grafico__)
            except:
                pass

    return itemlist
예제 #31
0
def peliculas(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron = '<div class="poster">.*?src="(.*?)" alt="(.*?)">.*?'
    patron += '"quality">(.*?)<.*?href="(.*?)".*?<span>(\d{4}).*?"texto">(.*?)<.*?'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedtitle, scrapedquality, scrapedurl, scrapedyear, scrapedplot in matches:
        url = scrapedurl
        thumbnail = scrapedthumbnail
        contentTitle = scrapedtitle
        quality = scrapedquality
        year = scrapedyear
        plot = scrapedplot
        if quality == "" or year == "":
            title = contentTitle
        else:
            title = contentTitle + " (" + year + ")  " + "[COLOR red]" + quality + "[/COLOR]"

        new_item = Item(channel=item.channel,
                        action="findvideos",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot,
                        contentTitle=contentTitle,
                        infoLabels={'year': year})

        if year:
            tmdb.set_infoLabels_item(new_item)
        itemlist.append(new_item)
    try:
        patron = '<a href="([^"]+)" ><span class="icon-chevron-right"></span></a></div>'
        next_page = re.compile(patron, re.DOTALL).findall(data)
        itemlist.append(
            Item(channel=item.channel,
                 action="peliculas",
                 title="Siguiente >>",
                 text_color="yellow",
                 url=next_page[0]))

    except:
        pass
    return itemlist
예제 #32
0
def more_info(item):
    logger.info()

    # Si se llega aquí mediante el menú contextual, hay que recuperar los parámetros action y channel
    if item.from_action:
        item.__dict__['action'] = item.__dict__.pop('from_action')
    if item.from_channel:
        item.__dict__['channel'] = item.__dict__.pop('from_channel')

    import xbmcgui
    from core import tmdb

    tmdb.set_infoLabels_item(item)

    xlistitem = xbmcgui.ListItem()
    platformtools.set_infolabels(xlistitem, item, True)

    ret = xbmcgui.Dialog().info(xlistitem)
def update_infolabels_movie(tmdb_id):
    logger.info()
    from core import tmdb

    db = TrackingData()

    infolabels = db.get_movie(tmdb_id)
    it = Item(infoLabels = infolabels)
    tmdb.set_infoLabels_item(it)
    if base64.b64encode(jsontools.dump(infolabels)) == base64.b64encode(jsontools.dump(it.infoLabels)):
        commit = False
        msg = 'Sin cambios en los datos de la película.'
    else:
        db.save_movie(tmdb_id, it.infoLabels)
        commit = True
        msg = 'Actualizados los datos de la película.'

    db.close(commit=commit)
    return True, msg
예제 #34
0
def episodios(item):
    logger.info("pelisalacarta.channels.descargasmix episodios")
    itemlist = []

    if item.extra == "":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = scrapertools.downloadpage(item.url)
    patron = '(<ul class="menu" id="seasons-list">.*?<div class="section-box related-posts">)'
    bloque = scrapertools.find_single_match(data, patron)
    matches = scrapertools.find_multiple_matches(bloque, '<div class="cap">(.*?)</div>')
    for scrapedtitle in matches:
        scrapedtitle = scrapedtitle.strip()
        item.infoLabels['season'] = scrapedtitle.split("x")[0]
        item.infoLabels['episode'] = scrapedtitle.split("x")[1]
        title = item.fulltitle+" "+scrapedtitle.strip()
        itemlist.append(item.clone(action="epienlaces", title=title, extra=scrapedtitle))

    itemlist.sort(key=lambda item: item.title, reverse=True)
    item.plot = scrapertools.find_single_match(data, '<strong>SINOPSIS</strong>:(.*?)</p>')
    if item.show != "" and item.extra == "":
        item.infoLabels['season'] = ""
        item.infoLabels['episode'] = ""
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))
        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la biblioteca", url=item.url,
                                 action="add_serie_to_library", extra="episodios", show=item.show,
                                 text_color="green"))

        if "tmbd_id" in item.infoLabels:
            try:
                from core import tmdb
                tmdb.set_infoLabels_itemlist(itemlist[:-2], __modo_grafico__)
            except:
                pass

    return itemlist
예제 #35
0
def findvideos(item):
    from core import servertools

    if item.infoLabels["tmdb_id"]:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    itemlist = servertools.find_video_items(item)
    
    library_path = config.get_library_path()
    if config.get_library_support():
        title = "Añadir película a la biblioteca"
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
            try:
                from core import filetools
                movie_path = filetools.join(config.get_library_path(), 'CINE')
                files = filetools.walk(movie_path)
                for dirpath, dirname, filename in files:
                    for f in filename:
                        if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
                            from core import library
                            head_nfo, it = library.read_nfo(filetools.join(dirpath, dirname, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "clasicofilm" in canales:
                                canales.pop(canales.index("clasicofilm"))
                                canales.insert(0, "[COLOR red]clasicofilm[/COLOR]")
                            title = "Película ya en tu biblioteca. [%s] ¿Añadir?" % ",".join(canales)
                            break
            except:
                import traceback
                logger.info(traceback.format_exc())
                pass
        
        itemlist.append(item.clone(action="add_pelicula_to_library", title=title))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt",
                                   extra="movie"))
    
    return itemlist
예제 #36
0
def findvideos(item):
    logger.info()
    itemlist = []

    tmdb.set_infoLabels_item(item, __modo_grafico__)
    data = httptools.downloadpage(item.url).data

    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>')
    fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"')
    if not item.fanart and fanart:
        item.fanart = fanart

    patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for tipo, source, title in matches:
        if tipo == "trailer":
            continue
        post = "source=%s&action=obtenerurl" % urllib.quote(source)
        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
        data_url = httptools.downloadpage(host+'wp-admin/admin-ajax.php', post, headers=headers).data
        url = jsontools.load_json(data_url).get("url")
        if "online.desmix" in url or "metiscs" in url:
            server = "directo"
        elif "openload" in url:
            server = "openload"
            url += "|Referer=" + item.url
        else:
            server = servertools.get_server_from_url(url)
            if server == "directo":
                continue
        title = "%s - %s" % (unicode(server, "utf8").capitalize().encode("utf8"), title)
        itemlist.append(item.clone(action="play", url=url, title=title, server=server, text_color=color3))

    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(item.clone(title="Añadir película a la biblioteca", action="add_pelicula_to_library",
                                   extra="findvideos", text_color="green"))

    return itemlist
예제 #37
0
def peliculas(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
    patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:

        url = scrapedurl
        title = scrapedtitle
        year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
        thumbnail = scrapedthumbnail
        new_item = Item(channel=item.channel,
                        action="findvideos",
                        title=title,
                        contentTitle=title,
                        url=url,
                        thumbnail=thumbnail,
                        infoLabels={'year': year})
        if year:
            tmdb.set_infoLabels_item(new_item)

        itemlist.append(new_item)

    next_page_url = scrapertools.find_single_match(
        data, '<link rel="next" href="(.*?)"\/>')
    if next_page_url != "":
        next_page_url = urlparse.urljoin(item.url, next_page_url)
        itemlist.append(
            item.clone(action="peliculas",
                       title="Siguiente >>",
                       text_color="yellow",
                       url=next_page_url))

    return itemlist
예제 #38
0
def menu_info(item):
    logger.info()
    itemlist = []
    
    data = httptools.downloadpage(item.url).data
    year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
    if year != "" and not "tmdb_id" in item.infoLabels:
        try:
            from core import tmdb
            item.infoLabels["year"] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass
    
    if item.infoLabels["plot"] == "":
        sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
        item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    data_trailer = httptools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id).data
    trailer_url = jsontools.load_json(data_trailer)["video"]["url"]
    if trailer_url != "":
        item.infoLabels["trailer"] = trailer_url

    title = "Ver enlaces %s - [" + item.contentTitle + "]"
    itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="media", type="streaming"))
    itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="media", type="download"))
    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))
    if config.get_library_support():
        itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
                             title="Añadir película a la biblioteca", url=item.url, thumbnail=item.thumbnail,
                             fanart=item.fanart, fulltitle=item.fulltitle,
                             extra="media|"))
    
    return itemlist
예제 #39
0
def findvideos(item):
    logger.info("pelisalacarta.channels.oranline findvideos")
    itemlist = []

    try:
        filtro_idioma = config.get_setting("filterlanguages", item.channel)
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_idioma = 4
        filtro_enlaces = 2

    dict_idiomas = {'Español': 3, 'Latino': 2, 'VOSE': 1, 'Inglés': 0}

    data = scrapertools.downloadpage(item.url)
    year = scrapertools.find_single_match(
        data, 'Año de lanzamiento.*?href.*?>(\d+)</a>')

    if year != "":
        item.infoLabels['filtro'] = ""
        item.infoLabels['year'] = int(year)

        if item.infoLabels['plot'] == "":
            # Ampliamos datos en tmdb
            try:
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

    if item.infoLabels['plot'] == "":
        plot = scrapertools.find_single_match(
            data, '<h2>Sinopsis</h2>.*?>(.*?)</p>')
        item.infoLabels['plot'] = plot

    if filtro_enlaces != 0:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "online", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Online",
                           text_color=color1,
                           text_blod=True))
            itemlist.extend(list_enlaces)
    if filtro_enlaces != 1:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "descarga", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Descarga",
                           text_color=color1,
                           text_blod=True))
            itemlist.extend(list_enlaces)

    # Opción "Añadir esta película a la biblioteca de XBMC"
    if itemlist:
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        if item.extra != "findvideos":
            if config.get_library_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir enlaces a la biblioteca",
                         text_color="green",
                         filtro=True,
                         action="add_pelicula_to_library",
                         fulltitle=item.fulltitle,
                         extra="findvideos",
                         url=item.url,
                         infoLabels={'title': item.fulltitle}))

    else:
        itemlist.append(
            item.clone(title="No hay enlaces disponibles",
                       action="",
                       text_color=color3))

    return itemlist
예제 #40
0
def findvideos(item):
    logger.info("pelisalacarta.channels.puyasubs findvideos")
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")

    itemlist = list()

    data = scrapertools.downloadpage(item.url)
    idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)<br />')
    calidades = ['720p', '1080p']
    torrents = scrapertools.find_multiple_matches(
        data, '<a href="(https://www.frozen-layer.com/descargas[^"]+)"')
    if not torrents:
        torrents = scrapertools.find_multiple_matches(
            data, '<a href="(https://www.nyaa.se/\?page=view[^"]+)"')
    if torrents:
        for i, enlace in enumerate(torrents):
            title = "Ver por Torrent   %s" % idiomas
            if ">720p" in data and ">1080p" in data:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(
                item.clone(title=title,
                           action="play",
                           url=enlace,
                           server="torrent"))

    onefichier = scrapertools.find_multiple_matches(
        data, '<a href="(https://1fichier.com/[^"]+)"')
    if onefichier:
        for i, enlace in enumerate(onefichier):
            title = "Ver por 1fichier   %s" % idiomas
            if ">720p" in data and ">1080p" in data:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(
                item.clone(title=title,
                           action="play",
                           url=enlace,
                           server="onefichier"))

    safelink = scrapertools.find_multiple_matches(
        data, '<a href="(http(?:s|)://safelinking.net/[^"]+)"')
    if safelink:
        for i, safe in enumerate(safelink):
            headers = [['Content-Type', 'application/json;charset=utf-8']]
            hash = safe.rsplit("/", 1)[1]
            post = jsontools.dump_json({"hash": hash})
            data_sf = scrapertools.downloadpage(
                "http://safelinking.net/v1/protected", post, headers)
            data_sf = jsontools.load_json(data_sf)

            for link in data_sf.get("links"):
                enlace = link["url"]
                domain = link["domain"]
                title = "Ver por %s" % domain
                action = "play"
                if "mega" in domain:
                    server = "mega"
                    if "/#F!" in enlace:
                        action = "carpeta"

                elif "1fichier" in domain:
                    server = "onefichier"
                    if "/dir/" in enlace:
                        action = "carpeta"

                title += "   %s" % idiomas
                if ">720p" in data and ">1080p" in data:
                    try:
                        title = "[%s]  %s" % (calidades[i], title)
                    except:
                        pass
                itemlist.append(
                    item.clone(title=title,
                               action=action,
                               url=enlace,
                               server=server))

    return itemlist
예제 #41
0
def findvideos(item):
    logger.info()
    if (item.extra and item.extra != "findvideos") or item.path:
        return epienlaces(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                       text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if enlaces and "peliculas.nu" not in enlaces:
                if i == 0:
                    extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title, action="", text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
    scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
                                       title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(item.clone(title="   Los enlaces se están subiendo", action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                           text_color="green"))
                continue
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist
예제 #42
0
def findvideos(item):
    logger.info()
    itemlist = []

    if item.contentType == "movie":
        # Descarga la página
        data = httptools.downloadpage(item.url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        if not item.infoLabels["tmdb_id"]:
            item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
                data, '<a href="https://www.themoviedb.org/'
                '[^/]+/(\d+)')
            item.infoLabels["year"] = scrapertools.find_single_match(
                data, 'class="e_new">(\d{4})')

        if __modo_grafico__:
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        if not item.infoLabels["plot"]:
            item.infoLabels["plot"] = scrapertools.find_single_match(
                data, 'itemprop="description">([^<]+)</div>')
        if not item.infoLabels["genre"]:
            item.infoLabels["genre"] = ", ".join(
                scrapertools.find_multiple_matches(
                    data, '<a itemprop="genre"[^>]+>'
                    '([^<]+)</a>'))

        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        if not ficha:
            ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
        cid = "0"
    else:
        ficha, cid = scrapertools.find_single_match(item.url,
                                                    'ficha=(\d+)&c_id=(\d+)')

    url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (
        apikey, sid, ficha, cid)
    data = httptools.downloadpage(url).data
    data = json.xmlTojson(None, data)

    for k, v in data["Data"].items():
        try:
            if type(v) is dict:
                if k == "Online":
                    order = 1
                elif k == "Download":
                    order = 0
                else:
                    order = 2

                itemlist.append(
                    item.clone(action="",
                               title=k,
                               text_color=color3,
                               order=order))
                if type(v["Item"]) is str:
                    continue
                elif type(v["Item"]) is dict:
                    v["Item"] = [v["Item"]]
                for it in v["Item"]:
                    try:
                        thumbnail = "%s/styles/prosilver/imageset/%s.png" % (
                            host, it['Host'])
                        title = "   %s - %s/%s" % (it['Host'].capitalize(),
                                                   it['Quality'], it['Lang'])
                        calidad = int(
                            scrapertools.find_single_match(
                                it['Quality'], '(\d+)p'))
                        calidadaudio = it['QualityA'].replace("...", "")
                        subtitulos = it['Subtitles'].replace(
                            "Sin subtítulos", "")
                        if subtitulos:
                            title += " (%s)" % subtitulos
                        if calidadaudio:
                            title += "  [Audio:%s]" % calidadaudio

                        likes = 0
                        if it["Likes"] != "0" or it["Dislikes"] != "0":
                            likes = int(it["Likes"]) - int(it["Dislikes"])
                            title += "  (%s ok, %s ko)" % (it["Likes"],
                                                           it["Dislikes"])
                        if type(it["Url"]) is dict:
                            for i, enlace in enumerate(it["Url"]["Item"]):
                                titulo = title + "  (Parte %s)" % (i + 1)
                                itemlist.append(
                                    item.clone(title=titulo,
                                               url=enlace,
                                               action="play",
                                               calidad=calidad,
                                               thumbnail=thumbnail,
                                               order=order,
                                               like=likes,
                                               ficha=ficha,
                                               cid=cid,
                                               folder=False))
                        else:
                            url = it["Url"]
                            itemlist.append(
                                item.clone(title=title,
                                           url=url,
                                           action="play",
                                           calidad=calidad,
                                           thumbnail=thumbnail,
                                           order=order,
                                           like=likes,
                                           ficha=ficha,
                                           cid=cid,
                                           folder=False))
                    except:
                        pass
        except:
            pass

    if not config.get_setting("order_web", "playmax"):
        itemlist.sort(key=lambda it: (it.order, it.calidad, it.like),
                      reverse=True)
    else:
        itemlist.sort(key=lambda it: it.order, reverse=True)
    if itemlist:
        itemlist.extend(acciones_fichas(item, sid, ficha))

    if not itemlist and item.contentType != "movie":
        url = url.replace("apikey=%s&" % apikey, "")
        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>'
        estrenos = scrapertools.find_multiple_matches(data, patron)
        for info in estrenos:
            info = "Estreno en " + scrapertools.htmlclean(info)
            itemlist.append(item.clone(action="", title=info))

    if not itemlist:
        itemlist.append(
            item.clone(action="", title="No hay enlaces disponibles"))

    return itemlist
예제 #43
0
def findvideos(item):
    logger.info("pelisalacarta.channels.vixto findvideos")
    itemlist = list()

    try:
        filtro_idioma = config.get_setting("filterlanguages", item.channel)
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_idioma = 3
        filtro_enlaces = 2

    dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0}

    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    if not item.infoLabels["tmdb_id"]:
        year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})')
    
        if year != "":
            item.infoLabels['filtro'] = ""
            item.infoLabels['year'] = int(year)

            # Ampliamos datos en tmdb
            try:
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

    if not item.infoLabels['plot']:
        plot = scrapertools.find_single_match(data, '<p class="plot">(.*?)</p>')
        item.infoLabels['plot'] = plot

    if filtro_enlaces != 0:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Ver Online", item)
        if list_enlaces:
            itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
                                       text_blod=True))
            itemlist.extend(list_enlaces)
    if filtro_enlaces != 1:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Descarga Directa", item)
        if list_enlaces:
            itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
                                       text_blod=True))
            itemlist.extend(list_enlaces)

    # Opción "Añadir esta película a la biblioteca de XBMC"
    if itemlist and item.contentType == "movie":
        contextual = config.is_xbmc()
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta", contextual=contextual))
        if item.extra != "findvideos":
            if config.get_library_support():
                itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la biblioteca", text_color="green",
                                     filtro=True, action="add_pelicula_to_library", fulltitle=item.fulltitle,
                                     extra="findvideos", url=item.url, infoLabels=item.infoLabels,
                                     contentType=item.contentType, contentTitle=item.contentTitle, show=item.show))
    elif not itemlist and item.contentType == "movie":
        itemlist.append(item.clone(title="Película sin enlaces disponibles", action="", text_color=color3))

    return itemlist
예제 #44
0
def findvideos(item):
    logger.info("pelisalacarta.channels.puyasubs findvideos")
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")

    itemlist = list()

    data = scrapertools.downloadpage(item.url)
    idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)<br />')
    calidades = ['720p', '1080p']
    torrents = scrapertools.find_multiple_matches(data, '<a href="(https://www.frozen-layer.com/descargas[^"]+)"')
    if not torrents:
        torrents = scrapertools.find_multiple_matches(data, '<a href="(https://www.nyaa.se/\?page=view[^"]+)"')    
    if torrents:
        for i, enlace in enumerate(torrents):
            title = "Ver por Torrent   %s" % idiomas
            if ">720p" in data and ">1080p" in data:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))

    onefichier = scrapertools.find_multiple_matches(data, '<a href="(https://1fichier.com/[^"]+)"')
    if onefichier:
        for i, enlace in enumerate(onefichier):
            title = "Ver por 1fichier   %s" % idiomas
            if ">720p" in data and ">1080p" in data:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier"))

    safelink = scrapertools.find_multiple_matches(data, '<a href="(http(?:s|)://safelinking.net/[^"]+)"')
    if safelink:
        for i, safe in enumerate(safelink):
            headers = [['Content-Type', 'application/json;charset=utf-8']]
            hash = safe.rsplit("/", 1)[1]
            post = jsontools.dump_json({"hash": hash})
            data_sf = scrapertools.downloadpage("http://safelinking.net/v1/protected", post, headers)
            data_sf = jsontools.load_json(data_sf)

            for link in data_sf.get("links"):
                enlace = link["url"]
                domain = link["domain"]
                title = "Ver por %s" % domain
                action = "play"
                if "mega" in domain:
                    server = "mega"
                    if "/#F!" in enlace:
                        action = "carpeta"

                elif "1fichier" in domain:
                    server = "onefichier"
                    if "/dir/" in enlace:
                        action = "carpeta"

                title += "   %s" % idiomas
                if ">720p" in data and ">1080p" in data:
                    try:
                        title = "[%s]  %s" % (calidades[i], title)
                    except:
                        pass
                itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))

    return itemlist
예제 #45
0
def findvideos(item):
    logger.info("pelisalacarta.channels.descargasmix findvideos")
    if item.extra != "":
        return epienlaces(item)
    itemlist = []
    item.text_color = color3
    data = scrapertools.downloadpage(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year != "":
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    #Patron torrent
    matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
    for scrapedurl in matches:
        title = "[Torrent] "
        title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
        itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
    
    #Patron online
    data_online = scrapertools.find_single_match(data, 'Enlaces para ver online(.*?)<div class="section-box related-'
                                                       'posts">')
    if len(data_online) > 0:
        patron = 'dm\(c.a\(\'([^\']+)\''
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for code in matches:
            enlace = dm(code)
            enlaces = servertools.findvideos(data=enlace)
            if len(enlaces) > 0:
                title = "Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))

    #Patron descarga
    data_descarga = scrapertools.find_single_match(data, 'Enlaces de descarga(.*?)<script>')
    patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\''
    matches = scrapertools.find_multiple_matches(data_descarga, patron)
    for scrapedserver, scrapedurl in matches:
        if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
            scrapedserver = "uploadedto"
        titulo = scrapedserver.capitalize()
        if titulo == "Magnet":
            continue
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(scrapedserver)
        if mostrar_server:
            try:
                servers_module = __import__("servers."+scrapedserver)
                #Saca numero de enlaces
                patron = "(dm\(c.a\('"+scrapedurl.replace("+", "\+")+"'.*?)</div>"
                data_enlaces = scrapertools.find_single_match(data_descarga, patron)
                patron = 'dm\(c.a\(\'([^\']+)\''
                matches_enlaces = scrapertools.find_multiple_matches(data_enlaces, patron)
                numero = str(len(matches_enlaces))
                if item.category != "Cine":
                    itemlist.append(item.clone(action="enlaces", title=titulo+" - Nº enlaces:"+numero,
                                               extra=scrapedurl))
            except:
                pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.category != "Cine" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                             text_color="green"))

    return itemlist
예제 #46
0
def menu_info(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
        data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
    item.infoLabels["year"] = scrapertools.find_single_match(
        data, 'class="e_new">(\d{4})')
    item.infoLabels["plot"] = scrapertools.find_single_match(
        data, 'itemprop="description">([^<]+)</div>')
    item.infoLabels["genre"] = ", ".join(
        scrapertools.find_multiple_matches(
            data, '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if __modo_grafico__:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    action = "findvideos"
    title = "Ver enlaces"
    if item.contentType == "tvshow":
        action = "episodios"
        title = "Ver capítulos"
    itemlist.append(item.clone(action=action, title=title))

    carpeta = "CINE"
    tipo = "película"
    action = "add_pelicula_to_library"
    extra = ""
    if item.contentType == "tvshow":
        carpeta = "SERIES"
        tipo = "serie"
        action = "add_serie_to_library"
        extra = "episodios###library"

    library_path = config.get_library_path()
    if config.get_library_support():
        title = "Añadir %s a la biblioteca" % tipo
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, carpeta)
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        namedir = dirpath.replace(path, '')[1:]
                        for f in filename:
                            if f != namedir + ".nfo" and f != "tvshow.nfo":
                                continue
                            from core import library
                            head_nfo, it = library.read_nfo(
                                filetools.join(dirpath, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "%s ya en tu biblioteca. [%s] ¿Añadir?" % (
                                tipo.capitalize(), ",".join(canales))
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(
            item.clone(action=action,
                       title=title,
                       text_color=color5,
                       extra=extra))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        extra = "movie"
        if item.contentType != "movie":
            extra = "tv"
        itemlist.append(
            item.clone(channel="tvmoviedb",
                       title="[Trakt] Gestionar con tu cuenta",
                       action="menu_trakt",
                       extra=extra))
    itemlist.append(
        item.clone(channel="trailertools",
                   action="buscartrailer",
                   title="Buscar Tráiler",
                   text_color="magenta",
                   context=""))

    itemlist.append(item.clone(action="", title=""))
    ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
    if not ficha:
        ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')

    itemlist.extend(acciones_fichas(item, sid, ficha, season=True))
    itemlist.append(
        item.clone(action="acciones_cuenta",
                   title="Añadir a una lista",
                   text_color=color3,
                   ficha=ficha))

    return itemlist
예제 #47
0
def find_and_set_infoLabels(item):
    """
    función que se llama para buscar y setear los infolabels
    :param item:
    :return:
    """

    global scraper_global
    logger.debug("item:\n" + item.tostring('\n'))

    params = {}

    if item.contentType == "movie":
        tipo_contenido = "pelicula"
        title = item.contentTitle
        # get scraper pelis
        scraper = Tmdb()
        # para tmdb
        tipo_busqueda = "movie"

    else:
        tipo_contenido = "serie"
        title = item.contentSerieName
        # get scraper series
        scraper = Tmdb()
        # para tmdb
        tipo_busqueda = "tv"

    # esto ya está en el scraper tmdb
    # title = re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', title)

    # Si el titulo incluye el (año) se lo quitamos
    year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$")
    if year:
        title = title.replace(year, "").strip()
        item.infoLabels['year'] = year[1:-1]

    scraper_result = None
    results = []
    while not scraper_result:
        # para tmdb
        if isinstance(scraper, Tmdb):
            logger.debug("scraper es Tmbdb")
            params["texto_buscado"] = title
            params["tipo"] = tipo_busqueda
            params["year"] = item.infoLabels['year']

        if not results:
            if not item.infoLabels.get("tmdb_id"):
                if not item.infoLabels.get("imdb_id"):
                    scraper_global = scraper(**params)
                else:
                    logger.info("tiene imdb")
                    # para tmdb
                    if isinstance(scraper, Tmdb):
                        params["external_id"] = item.infoLabels.get("imdb_id")
                        params["external_source"] = "imdb_id"

                    scraper_global = scraper(**params)

            elif not scraper_global or scraper_global.result.get("id") != item.infoLabels['tmdb_id']:
                # para tmdb
                if isinstance(scraper, Tmdb):
                    params["id_Tmdb"] = item.infoLabels['tmdb_id']
                    params["idioma_busqueda"] = "es"

                scraper_global = scraper(**params)

            results = scraper_global.get_list_resultados()

        if len(results) > 1:
            scraper_result = platformtools.show_video_info(results, item=item, scraper=scraper,
                                                           caption="[%s]: Selecciona la %s correcta"
                                                                   % (title, tipo_contenido))

        elif len(results) > 0:
            scraper_result = results[0]

        if scraper_result is None:
            index = -1
            if tipo_contenido == "serie":
                # Si no lo encuentra la serie por si solo, presentamos una lista de opciones
                opciones = ["Introducir otro nombre", "Buscar en TheTvDB.com"]
                index = platformtools.dialog_select("%s no encontrada" % tipo_contenido.capitalize(), opciones)

            elif platformtools.dialog_yesno("Película no encontrada", "No se ha encontrado la película:", title,
                                            '¿Desea introducir otro nombre?'):
                index = 0

            if index < 0:
                logger.debug("he pulsado 'cancelar' en la ventana '%s no encontrada'" % tipo_contenido.capitalize())
                break

            if index == 0: # "Introducir otro nombre"
                # Pregunta el titulo
                it = platformtools.dialog_input(title, "Introduzca el nombre de la %s a buscar" % tipo_contenido)
                if it is not None:
                    title = it
                    item.infoLabels['year'] = ""
                    # reseteamos los resultados
                    results = []
                else:
                    logger.debug("he pulsado 'cancelar' en la ventana 'introduzca el nombre correcto'")
                    break

            if index == 1: # "Buscar en TheTvDB.com"
                results = tvdb_series_by_title(title)

    if isinstance(item.infoLabels, InfoLabels):
        infoLabels = item.infoLabels
    else:
        infoLabels = InfoLabels()

    if scraper_result:
        if 'id' in scraper_result:
            # resultados obtenidos de tmdb
            infoLabels['tmdb_id'] = scraper_result['id']
            infoLabels['url_scraper'] = "https://www.themoviedb.org/tv/%s" % infoLabels['tmdb_id']
            item.infoLabels = infoLabels
            tmdb.set_infoLabels_item(item)

        elif 'tvdb_id' in scraper_result:
            # resultados obtenidos de tvdb
            infoLabels.update(scraper_result)
            item.infoLabels = infoLabels

        # logger.debug("item:\n" + item.tostring('\n'))
        return True
    else:
        item.infoLabels = infoLabels
        return False