Example #1
0
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<p>(.*?)</p>.+?'
                         '<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
    itemlist = []

    for thumbnail, _type, plot, url, title in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)
        if _type != "Película":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    return itemlist
Example #2
0
def busqueda(item):
    logger.info("pelisalacarta.channels.vixto busqueda")
    itemlist = list()

    # Descarga la página
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data, '<h2>Peliculas</h2>(.*?)</div>')
    bloque += scrapertools.find_single_match(data, '<h2>Series</h2>(.*?)</div>')

    patron = '<figure class="col-lg-2.*?href="([^"]+)".*?src="([^"]+)".*?<figcaption title="([^"]+)"'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    peliculas = False
    series = False
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        new_item = Item(channel=item.channel, contentType="movie", url=scrapedurl, title="   "+scrapedtitle,
                        text_color=color1, context="buscar_trailer", fulltitle=scrapedtitle,
                        contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, action="findvideos")

        if "/peliculas/" in scrapedurl and not peliculas:
            itemlist.append(Item(channel=item.channel, action="", title="Películas", text_color=color2))
            peliculas = True
        if "/series/" in scrapedurl and not series:
            itemlist.append(Item(channel=item.channel, action="", title="Series", text_color=color2))
            series = True

        if "/series/" in scrapedurl:
            new_item.contentType = "tvshow"
            new_item.show = scrapedtitle
            new_item.action = "episodios"
            
        filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "")
        filtro_list = {"poster_path": filtro_thumb}
        new_item.infoLabels["filtro"] = filtro_list.items()
        itemlist.append(new_item)

    try:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
Example #3
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    logger.debug("datito %s" % data)

    url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')

    data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')

    matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
                         '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
                         re.DOTALL).findall(data)

    itemlist = []

    for thumbnail, url, title, genres, plot in matches:

        title = clean_title(title)
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if "Pelicula Anime" in genres:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.show = title
            new_item.context = renumbertools.context

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #4
0
def newest(categoria):
    support.log("newest" + categoria)
    itemlist = []
    item = Item()
    try:
        if categoria == "series":
            item.url = host + "/episodi/"
            item.action = "peliculas"
            item.args = "lateste"
            item.contentType = "episode"
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

    # Continua la ricerca in caso di errore
    except Exception, e:
        import traceback
        traceback.print_stack()
        support.log(str(e))
        return []
def newest(categoria):
    support.info('newest', categoria)
    itemlist = []
    item = Item()
    try:
        if categoria == "peliculas":
            item.url = host
            item.action = "peliculas"
            item.contentType='movie'
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()
    # Continua la ricerca in caso di errore
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
Example #6
0
def novedades_anime(item):
    logger.info()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    matches = re.compile('href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
                         '(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data)
    itemlist = []
    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)
        if _type != "Película":
            new_item.show = title
            new_item.context = renumbertools.context(item)
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        itemlist.append(new_item)
    return itemlist
Example #7
0
def listado(item):
    logger.info()
    itemlist = []
    
    data = get_source(item.url)

    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')
    
    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)
    
    matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
                         '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)
    
    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        
        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        plot=plot)
        
        if _type == "Anime":
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)
        
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        
        itemlist.append(new_item)
    
    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"
        
        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
    
    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    
    return itemlist
Example #8
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<a href="([^"]+)">(.*?)</a>.+?'
                         'class="Desc ScrlV"><p>(.*?)</p>', re.DOTALL).findall(data)

    itemlist = []

    for thumbnail, _type, url, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if _type == "Anime":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #9
0
def newest(categoria):
    support.log('newest ->', categoria)
    itemlist = []
    item = Item()
    if categoria == 'series':
        try:
            item.contentType = 'tvshow'
            item.args = 'newest'
            item.url = host
            item.action = 'peliculas'
            itemlist = peliculas(item)

            if itemlist[-1].action == 'peliculas':
                itemlist.pop()
        # Continua la ricerca in caso di errore
        except:
            import sys
            for line in sys.exc_info():
                support.log('newest log: ', {0}.format(line))
            return []

    return itemlist
Example #10
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
                         '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)

    itemlist = []

    for url, thumbnail, _type, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if _type == "Anime":
            new_item.show = title
            new_item.context = renumbertools.context(item)
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #11
0
def newest(categoria):
    support.log()
    itemlist = []
    item = Item()
    item.contentType = 'tvshow'
    item.args = True
    try:
        item.url = "%s/aggiornamento-episodi/" % host
        item.action = "peliculas"
        itemlist = peliculas(item)

        if itemlist[-1].action == "peliculas":
            itemlist.pop()

    # Continua la ricerca in caso di errore
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
Example #12
0
def newest(categoria):
    support.info("newest", categoria)
    itemlist = []
    item = Item()
    try:
        if categoria == "series":
            item.url = host + "/aggiornamento-episodi/"
            item.action = "peliculas"
            item.args = "update"
            item.contentType = "episode"
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

    # Continua la ricerca in caso di errore
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
Example #13
0
def newest(categoria):
    log()
    itemlist = []
    item = Item()
    item.contentType= 'episode'
    item.args = 'update'
    try:
        if categoria == "series":
            item.url = "%s/lista-serie-tv" % host
            item.action = "peliculas"
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

    # Continua la ricerca in caso di errore
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
Example #14
0
def newest(categoria):
    logger.info("[italiaserie.py]==> newest" + categoria)
    itemlist = []
    item = Item()
    try:
        if categoria == "series":
            item.url = host + "/ultimi-episodi/"
            item.action = "peliculas"
            item.args = "latest"
            item.contentType = "episode"
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

    # Continua la ricerca in caso di errore
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
Example #15
0
def listado(item):
    logger.info()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(
        data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
    data = scrapertools.find_single_match(
        data, '</div><div class="full">(.*?)<div class="pagination')
    matches = re.compile(
        '<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
        '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
        re.DOTALL).findall(data)
    itemlist = []
    for thumbnail, url, title, genres, plot in matches:
        title = clean_title(title)
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        fulltitle=title,
                        plot=plot)
        if "Pelicula Anime" in genres:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.show = title
            new_item.context = renumbertools.context(item)
        itemlist.append(new_item)
    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"
        itemlist.append(
            Item(channel=item.channel, action="listado", title=title, url=url))
    return itemlist
Example #16
0
def novedades_anime(item):
    logger.info()
    itemlist = []

    patr = '<ul class="ListAnimes[^>]+>(.*?)</ul>'
    data = get_source(item.url, patron=patr)

    patron = 'href="([^"]+)".+?<img src="([^"]+)".+?'
    patron += '<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
    patron += '(?:</p><p>(.*?)</p>.+?)?</article></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot)

        if _type != "Película":
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)

        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist
Example #17
0
def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
    """
    guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist
    @type path: str
    @param path: ruta donde guardar los episodios
    @type episodelist: list
    @param episodelist: listado de items que representan los episodios que se van a guardar.
    @type serie: item
    @param serie: serie de la que se van a guardar los episodios
    @type silent: bool
    @param silent: establece si se muestra la notificación
    @param overwrite: permite sobreescribir los ficheros existentes
    @type overwrite: bool
    @rtype insertados: int
    @return:  el número de episodios insertados
    @rtype sobreescritos: int
    @return:  el número de episodios sobreescritos
    @rtype fallidos: int
    @return:  el número de episodios fallidos
    """
    logger.info()

    # No hay lista de episodios, no hay nada que guardar
    if not len(episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    insertados = 0
    sobreescritos = 0
    fallidos = 0
    news_in_playcounts = {}

    # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
    raiz, carpetas_series, ficheros = next(filetools.walk(path))
    ficheros = [filetools.join(path, f) for f in ficheros]

    nostrm_episodelist = []
    for root, folders, files in filetools.walk(path):
        for file in files:
            season_episode = scrapertools.get_season_and_episode(file)
            if season_episode == "" or filetools.exists(filetools.join(path, "%s.strm" % season_episode)):
                continue
            nostrm_episodelist.append(season_episode)
    nostrm_episodelist = sorted(set(nostrm_episodelist))

    # Silent es para no mostrar progreso (para videolibrary_service)
    if not silent:
        # progress dialog
        p_dialog = platformtools.dialog_progress(config.get_localized_string(20000), config.get_localized_string(60064))
        p_dialog.update(0, config.get_localized_string(60065))

    channel_alt = generictools.verify_channel(serie.channel)            #Preparamos para añadir las urls de emergencia
    emergency_urls_stat = config.get_setting("emergency_urls", channel_alt)         #El canal quiere urls de emergencia?
    emergency_urls_succ = False
    channel = __import__('channels.%s' % channel_alt, fromlist=["channels.%s" % channel_alt])
    if serie.torrent_caching_fail:                              #Si el proceso de conversión ha fallado, no se cachean
        emergency_urls_stat = 0
        del serie.torrent_caching_fail
    
    new_episodelist = []
    # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean
    tags = []
    if config.get_setting("enable_filter", "videolibrary"):
        tags = [x.strip() for x in config.get_setting("filters", "videolibrary").lower().split(",")]

    for e in episodelist:
        headers = {}
        if e.headers:
            headers = e.headers
        if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags):
            continue
        
        try:
            season_episode = scrapertools.get_season_and_episode(e.title)
            if not season_episode:
                continue
        
            # Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal
            if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls    #Borramos trazas anteriores
            json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())    #Path del .json del episodio
            if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode':     #Guardamos urls de emergencia?
                if not silent:
                    p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title)     #progress dialog
                if json_path in ficheros:                                   #Si existe el .json sacamos de ahí las urls
                    if overwrite:                                           #pero solo si se se sobrescriben los .json
                        json_epi = Item().fromjson(filetools.read(json_path))                   #Leemos el .json
                        if json_epi.emergency_urls:                         #si existen las urls de emergencia...
                            e.emergency_urls = json_epi.emergency_urls      #... las copiamos
                        else:                                               #y si no...
                            e = emergency_urls(e, channel, json_path, headers=headers)  #... las generamos
                else:
                    e = emergency_urls(e, channel, json_path, headers=headers)  #Si el episodio no existe, generamos las urls
                if e.emergency_urls:                                        #Si ya tenemos urls...
                    emergency_urls_succ = True                              #... es un éxito y vamos a marcar el .nfo
            elif emergency_urls_stat == 2 and e.contentType == 'episode':   #Borramos urls de emergencia?
                if e.emergency_urls: del e.emergency_urls
                emergency_urls_succ = True                                  #... es un éxito y vamos a marcar el .nfo
            elif emergency_urls_stat == 3 and e.contentType == 'episode':   #Actualizamos urls de emergencia?
                if not silent:
                    p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title)     #progress dialog
                e = emergency_urls(e, channel, json_path, headers=headers)  #generamos las urls
                if e.emergency_urls:                                        #Si ya tenemos urls...
                    emergency_urls_succ = True                              #... es un éxito y vamos a marcar el .nfo
            
            if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]):                                                    #en series multicanal, prevalece el infolabels...
                e.infoLabels = serie.infoLabels                             #... del canal actual y no el del original
            e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
            if e.videolibray_emergency_urls:
                del e.videolibray_emergency_urls
            if e.channel_redir:
                del e.channel_redir                                         #... y se borran las marcas de redirecciones
            new_episodelist.append(e)
        except:
            if e.contentType == 'episode':
                logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % e.contentTitle)
                logger.error(traceback.format_exc())
            continue

    # No hay lista de episodios, no hay nada que guardar
    if not len(new_episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    # fix float porque la division se hace mal en python 2.x
    try:
        t = float(100) / len(new_episodelist)
    except:
        t = 0

    last_season_episode = ''
    for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
        if not silent:
            p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title)

        high_sea = e.contentSeason
        high_epi = e.contentEpisodeNumber
        if scrapertools.find_single_match(e.title, '[a|A][l|L]\s*(\d+)'):
            high_epi = int(scrapertools.find_single_match(e.title, 'al\s*(\d+)'))
        max_sea = e.infoLabels["number_of_seasons"]
        max_epi = 0
        if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1):
            if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]:
                max_epi = e.infoLabels["number_of_episodes"]
            else:
                max_epi = e.infoLabels["temporada_num_episodios"]

        season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2))
        strm_path = filetools.join(path, "%s.strm" % season_episode)
        nfo_path = filetools.join(path, "%s.nfo" % season_episode)
        json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())

        if season_episode in nostrm_episodelist:
            logger.error('Error en la estructura de la Videoteca: Serie ' + serie.contentSerieName + ' ' + season_episode)
            continue
        strm_exists = strm_path in ficheros
        nfo_exists = nfo_path in ficheros
        json_exists = json_path in ficheros

        if not strm_exists:
            # Si no existe season_episode.strm añadirlo
            item_strm = Item(action='play_from_library', channel='videolibrary',
                             strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={})
            item_strm.contentSeason = e.contentSeason
            item_strm.contentEpisodeNumber = e.contentEpisodeNumber
            item_strm.contentType = e.contentType
            item_strm.contentTitle = season_episode

            # FILTERTOOLS
            if item_strm.list_language:
                # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar
                if "library_filter_show" in serie:
                    item_strm.library_filter_show = serie.library_filter_show

                if item_strm.library_filter_show == "":
                    logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar")

            # logger.debug("item_strm" + item_strm.tostring('\n'))
            # logger.debug("serie " + serie.tostring('\n'))
            strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl()))

        item_nfo = None
        if not nfo_exists and e.infoLabels["code"]:
            # Si no existe season_episode.nfo añadirlo
            scraper.find_and_set_infoLabels(e)
            head_nfo = scraper.get_nfo(e)

            item_nfo = e.clone(channel="videolibrary", url="", action='findvideos',
                               strm_path=strm_path.replace(TVSHOWS_PATH, ""))
            if item_nfo.emergency_urls:
                del item_nfo.emergency_urls                     #Solo se mantiene en el .json del episodio

            nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson())

        # Solo si existen season_episode.nfo y season_episode.strm continuamos
        if nfo_exists and strm_exists:
            if not json_exists or overwrite:
                # Obtenemos infoLabel del episodio
                if not item_nfo:
                    head_nfo, item_nfo = read_nfo(nfo_path)

                # En series multicanal, prevalece el infolabels del canal actual y no el del original
                if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \
                            and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): 
                    e.infoLabels = item_nfo.infoLabels

                if filetools.write(json_path, e.tojson()):
                    if not json_exists:
                        logger.info("Insertado: %s" % json_path)
                        insertados += 1
                        # Marcamos episodio como no visto
                        news_in_playcounts[season_episode] = 0
                        # Marcamos la temporada como no vista
                        news_in_playcounts["season %s" % e.contentSeason] = 0
                        # Marcamos la serie como no vista
                        # logger.debug("serie " + serie.tostring('\n'))
                        news_in_playcounts[serie.contentSerieName] = 0

                    else:
                        logger.info("Sobreescrito: %s" % json_path)
                        sobreescritos += 1
                else:
                    logger.info("Fallido: %s" % json_path)
                    fallidos += 1

        else:
            logger.info("Fallido: %s" % json_path)
            fallidos += 1

        if not silent and p_dialog.iscanceled():
            break

    #logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi)))
    #logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi)))
    if not silent:
        p_dialog.close()

    if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled":
        # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ...
        tvshow_path = filetools.join(path, "tvshow.nfo")
        try:
            import datetime
            head_nfo, tvshow_item = read_nfo(tvshow_path)
            tvshow_item.library_playcounts.update(news_in_playcounts)
            
            #Si la operación de insertar/borrar urls de emergencia en los .jsons de los episodios ha tenido éxito, se marca el .nfo
            if emergency_urls_succ:
                if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict):
                    del tvshow_item.emergency_urls
                if emergency_urls_stat in [1, 3]:                               #Operación de guardar/actualizar enlaces
                    if not tvshow_item.emergency_urls:
                        tvshow_item.emergency_urls = dict()
                    if tvshow_item.library_urls.get(serie.channel, False):
                        tvshow_item.emergency_urls.update({serie.channel: True})
                elif emergency_urls_stat == 2:                                  #Operación de Borrar enlaces
                    if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False):
                        tvshow_item.emergency_urls.pop(serie.channel, None)     #borramos la entrada del .nfo
                        
            if tvshow_item.active == 30:
                tvshow_item.active = 1
            if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]:
                tvshow_item.infoLabels = serie.infoLabels
                tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] 

            if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" 
                            or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0:
                tvshow_item.active = 0                                          # ... no la actualizaremos más
                logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'.  Se desactiva la actualización periódica" % \
                            (serie.contentSerieName, serie.channel))
            
            update_last = datetime.date.today()
            tvshow_item.update_last = update_last.strftime('%Y-%m-%d')
            update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active))
            tvshow_item.update_next = update_next.strftime('%Y-%m-%d')

            filetools.write(tvshow_path, head_nfo + tvshow_item.tojson())
        except:
            logger.error("Error al actualizar tvshow.nfo")
            logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % tvshow_item.contentSerieName)
            logger.error(traceback.format_exc())
            fallidos = -1
        else:
            # ... si ha sido correcto actualizamos la videoteca de Kodi
            if config.is_xbmc() and not silent:
                from platformcode import xbmc_videolibrary
                xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path))

    if fallidos == len(episodelist):
        fallidos = -1

    logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" %
                 (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos))
    return insertados, sobreescritos, fallidos
Example #18
0
def save_library_episodes(path,
                          episodelist,
                          serie,
                          silent=False,
                          overwrite=True):
    """
    guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist
    @type path: str
    @param path: ruta donde guardar los episodios
    @type episodelist: list
    @param episodelist: listado de items que representan los episodios que se van a guardar.
    @type serie: item
    @param serie: serie de la que se van a guardar los episodios
    @type silent: bool
    @param silent: establece si se muestra la notificación
    @param overwrite: permite sobreescribir los ficheros existentes
    @type overwrite: bool
    @rtype insertados: int
    @return:  el número de episodios insertados
    @rtype sobreescritos: int
    @return:  el número de episodios sobreescritos
    @rtype fallidos: int
    @return:  el número de episodios fallidos
    """
    logger.info()

    # No hay lista de episodios, no hay nada que guardar
    if not len(episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    insertados = 0
    sobreescritos = 0
    fallidos = 0
    news_in_playcounts = {}

    if overwrite == "everything":
        overwrite = True
        overwrite_everything = True
    else:
        overwrite_everything = False

    # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
    raiz, carpetas_series, ficheros = filetools.walk(path).next()
    ficheros = [filetools.join(path, f) for f in ficheros]

    # Silent es para no mostrar progreso (para library_service)
    if not silent:
        # progress dialog
        p_dialog = platformtools.dialog_progress('streamondemand',
                                                 'Aggiunta episodi...')
        p_dialog.update(0, 'Aggiunta episodio...')

    # fix float porque la division se hace mal en python 2.x
    t = float(100) / len(episodelist)

    for i, e in enumerate(episodelist):
        if not silent:
            p_dialog.update(int(math.ceil((i + 1) * t)),
                            'Aggiunta episodio...', e.title)

        try:
            season_episode = scrapertools.get_season_and_episode(
                e.title.lower())

            e.infoLabels = serie.infoLabels
            e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
            season_episode = "%sx%s" % (e.contentSeason,
                                        str(e.contentEpisodeNumber).zfill(2))
        except:
            continue

        strm_path = filetools.join(path, "%s.strm" % season_episode)
        nfo_path = filetools.join(path, "%s.nfo" % season_episode)
        json_path = filetools.join(path, ("%s [%s].json" %
                                          (season_episode, e.channel)).lower())

        strm_exists = strm_path in ficheros
        nfo_exists = nfo_path in ficheros
        json_exists = json_path in ficheros

        strm_exists_before = True
        nfo_exists_before = True
        json_exists_before = True

        if not strm_exists or overwrite_everything:
            if not overwrite_everything:
                strm_exists_before = False

            # Si no existe season_episode.strm añadirlo
            item_strm = Item(action='play_from_library',
                             channel='biblioteca',
                             strm_path=strm_path.replace(TVSHOWS_PATH, ""),
                             infoLabels={})
            item_strm.contentSeason = e.contentSeason
            item_strm.contentEpisodeNumber = e.contentEpisodeNumber
            item_strm.contentType = e.contentType
            item_strm.contentTitle = season_episode

            # FILTERTOOLS
            if item_strm.list_idiomas:
                # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar
                if "library_filter_show" in serie:
                    item_strm.library_filter_show = serie.library_filter_show

                if item_strm.library_filter_show == "":
                    logger.error(
                        "Se ha producido un error al obtener el nombre de la serie a filtrar"
                    )

            # logger.debug("item_strm" + item_strm.tostring('\n'))
            # logger.debug("serie " + serie.tostring('\n'))
            strm_exists = filetools.write(
                strm_path, '%s?%s' % (addon_name, item_strm.tourl()))

        item_nfo = None
        if (not nfo_exists
                or overwrite_everything) and e.infoLabels.get("imdb_id"):
            if not overwrite_everything:
                nfo_exists_before = False

            # Si no existe season_episode.nfo añadirlo
            if e.infoLabels["tmdb_id"]:
                scraper.find_and_set_infoLabels(e)
                head_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % (
                    e.infoLabels['tmdb_id'], e.contentSeason,
                    e.contentEpisodeNumber)

            elif e.infoLabels["tvdb_id"]:
                head_nfo = e.url_scraper

            else:
                head_nfo = "Aqui ira el xml"  # TODO

            item_nfo = e.clone(channel="biblioteca",
                               url="",
                               action='findvideos',
                               strm_path=strm_path.replace(TVSHOWS_PATH, ""))

            nfo_exists = filetools.write(nfo_path,
                                         head_nfo + item_nfo.tojson())

        # Solo si existen season_episode.nfo y season_episode.strm continuamos
        if nfo_exists and strm_exists:

            if not json_exists or overwrite:
                # Obtenemos infoLabel del episodio
                if not item_nfo:
                    head_nfo, item_nfo = read_nfo(nfo_path)

                e.infoLabels = item_nfo.infoLabels

                if filetools.write(json_path, e.tojson()):
                    if not json_exists or overwrite_everything:
                        if not overwrite_everything:
                            json_exists_before = False
                            logger.info("Insertado: %s" % json_path)
                        else:
                            logger.info("Sobreescritos todos los archivos!")
                        # Marcamos episodio como no visto
                        news_in_playcounts[season_episode] = 0
                        # Marcamos la temporada como no vista
                        news_in_playcounts["season %s" % e.contentSeason] = 0
                        # Marcamos la serie como no vista
                        # logger.debug("serie " + serie.tostring('\n'))
                        news_in_playcounts[serie.contentTitle] = 0
                        if (not overwrite_everything and not json_exists):
                            json_exists = True
                    else:
                        logger.info("Sobreescrito: %s" % json_path)
                        sobreescritos += 1
                else:
                    logger.info("Fallido: %s" % json_path)
                    fallidos += 1

        else:
            logger.info("Fallido: %s" % json_path)
            fallidos += 1

        if (not strm_exists_before or not nfo_exists_before
                or not json_exists_before):
            if (strm_exists and nfo_exists and json_exists):
                insertados += 1
            else:
                logger.error("El archivo strm, nfo o json no existe")

        if not silent and p_dialog.iscanceled():
            break

    if not silent:
        p_dialog.close()

    if news_in_playcounts:
        # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ...
        tvshow_path = filetools.join(path, "tvshow.nfo")
        try:
            import datetime
            head_nfo, tvshow_item = read_nfo(tvshow_path)
            tvshow_item.library_playcounts.update(news_in_playcounts)

            if tvshow_item.active == 30:
                tvshow_item.active = 1
            update_last = datetime.date.today()
            tvshow_item.update_last = update_last.strftime('%Y-%m-%d')
            update_next = datetime.date.today() + datetime.timedelta(
                days=int(tvshow_item.active))
            tvshow_item.update_next = update_next.strftime('%Y-%m-%d')

            filetools.write(tvshow_path, head_nfo + tvshow_item.tojson())
        except:
            logger.error("Error al actualizar tvshow.nfo")
            fallidos = -1

        # ... y actualizamos la biblioteca de Kodi
        if config.is_xbmc() and not silent:
            from platformcode import xbmc_library
            xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path))

    if fallidos == len(episodelist):
        fallidos = -1

    logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" %
                 (serie.contentSerieName, serie.channel, insertados,
                  sobreescritos, fallidos))
    return insertados, sobreescritos, fallidos
Example #19
0
def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
    """
    guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist
    @type path: str
    @param path: ruta donde guardar los episodios
    @type episodelist: list
    @param episodelist: listado de items que representan los episodios que se van a guardar.
    @type serie: item
    @param serie: serie de la que se van a guardar los episodios
    @type silent: bool
    @param silent: establece si se muestra la notificación
    @param overwrite: permite sobreescribir los ficheros existentes
    @type overwrite: bool
    @rtype insertados: int
    @return:  el número de episodios insertados
    @rtype sobreescritos: int
    @return:  el número de episodios sobreescritos
    @rtype fallidos: int
    @return:  el número de episodios fallidos
    """
    logger.info()

    # No hay lista de episodios, no hay nada que guardar
    if not len(episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    insertados = 0
    sobreescritos = 0
    fallidos = 0
    news_in_playcounts = {}

    # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
    raiz, carpetas_series, ficheros = filetools.walk(path).next()
    ficheros = [filetools.join(path, f) for f in ficheros]

    nostrm_episodelist = []
    for root, folders, files in filetools.walk(path):
        for file in files:
            season_episode = scrapertools.get_season_and_episode(file)
            if season_episode == "" or filetools.exists(
                    filetools.join(path, "%s.strm" % season_episode)):
                continue
            nostrm_episodelist.append(season_episode)
    nostrm_episodelist = sorted(set(nostrm_episodelist))

    # Silent es para no mostrar progreso (para videolibrary_service)
    if not silent:
        # progress dialog
        p_dialog = platformtools.dialog_progress(
            config.get_localized_string(20000),
            config.get_localized_string(60064))
        p_dialog.update(0, config.get_localized_string(60065))

    new_episodelist = []
    # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean
    tags = []
    if config.get_setting("enable_filter", "videolibrary"):
        tags = [
            x.strip() for x in config.get_setting(
                "filters", "videolibrary").lower().split(",")
        ]
    for e in episodelist:
        if tags != [] and tags != None and any(tag in e.title.lower()
                                               for tag in tags):
            continue
        try:
            season_episode = scrapertools.get_season_and_episode(e.title)

            e.infoLabels = serie.infoLabels
            e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
            new_episodelist.append(e)
        except:
            continue

    # No hay lista de episodios, no hay nada que guardar
    if not len(new_episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    # fix float porque la division se hace mal en python 2.x
    t = float(100) / len(new_episodelist)

    for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
        if not silent:
            p_dialog.update(int(math.ceil((i + 1) * t)),
                            'Añadiendo episodio...', e.title)

        season_episode = "%sx%s" % (e.contentSeason, str(
            e.contentEpisodeNumber).zfill(2))
        strm_path = filetools.join(path, "%s.strm" % season_episode)
        nfo_path = filetools.join(path, "%s.nfo" % season_episode)
        json_path = filetools.join(path, ("%s [%s].json" %
                                          (season_episode, e.channel)).lower())

        if season_episode in nostrm_episodelist:
            continue
        strm_exists = strm_path in ficheros
        nfo_exists = nfo_path in ficheros
        json_exists = json_path in ficheros

        if not strm_exists:
            # Si no existe season_episode.strm añadirlo
            item_strm = Item(action='play_from_library',
                             channel='videolibrary',
                             strm_path=strm_path.replace(TVSHOWS_PATH, ""),
                             infoLabels={})
            item_strm.contentSeason = e.contentSeason
            item_strm.contentEpisodeNumber = e.contentEpisodeNumber
            item_strm.contentType = e.contentType
            item_strm.contentTitle = season_episode

            # FILTERTOOLS
            if item_strm.list_language:
                # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar
                if "library_filter_show" in serie:
                    item_strm.library_filter_show = serie.library_filter_show

                if item_strm.library_filter_show == "":
                    logger.error(
                        "Se ha producido un error al obtener el nombre de la serie a filtrar"
                    )

            # logger.debug("item_strm" + item_strm.tostring('\n'))
            # logger.debug("serie " + serie.tostring('\n'))
            strm_exists = filetools.write(
                strm_path, '%s?%s' % (addon_name, item_strm.tourl()))

        item_nfo = None
        if not nfo_exists and e.infoLabels["code"]:
            # Si no existe season_episode.nfo añadirlo
            scraper.find_and_set_infoLabels(e)
            head_nfo = scraper.get_nfo(e)

            item_nfo = e.clone(channel="videolibrary",
                               url="",
                               action='findvideos',
                               strm_path=strm_path.replace(TVSHOWS_PATH, ""))

            nfo_exists = filetools.write(nfo_path,
                                         head_nfo + item_nfo.tojson())

        # Solo si existen season_episode.nfo y season_episode.strm continuamos
        if nfo_exists and strm_exists:
            if not json_exists or overwrite:
                # Obtenemos infoLabel del episodio
                if not item_nfo:
                    head_nfo, item_nfo = read_nfo(nfo_path)

                e.infoLabels = item_nfo.infoLabels

                if filetools.write(json_path, e.tojson()):
                    if not json_exists:
                        logger.info("Insertado: %s" % json_path)
                        insertados += 1
                        # Marcamos episodio como no visto
                        news_in_playcounts[season_episode] = 0
                        # Marcamos la temporada como no vista
                        news_in_playcounts["season %s" % e.contentSeason] = 0
                        # Marcamos la serie como no vista
                        # logger.debug("serie " + serie.tostring('\n'))
                        news_in_playcounts[serie.contentSerieName] = 0

                    else:
                        logger.info("Sobreescrito: %s" % json_path)
                        sobreescritos += 1
                else:
                    logger.info("Fallido: %s" % json_path)
                    fallidos += 1

        else:
            logger.info("Fallido: %s" % json_path)
            fallidos += 1

        if not silent and p_dialog.iscanceled():
            break

    if not silent:
        p_dialog.close()

    if news_in_playcounts:
        # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ...
        tvshow_path = filetools.join(path, "tvshow.nfo")
        try:
            import datetime
            head_nfo, tvshow_item = read_nfo(tvshow_path)
            tvshow_item.library_playcounts.update(news_in_playcounts)

            if tvshow_item.active == 30:
                tvshow_item.active = 1
            update_last = datetime.date.today()
            tvshow_item.update_last = update_last.strftime('%Y-%m-%d')
            update_next = datetime.date.today() + datetime.timedelta(
                days=int(tvshow_item.active))
            tvshow_item.update_next = update_next.strftime('%Y-%m-%d')

            filetools.write(tvshow_path, head_nfo + tvshow_item.tojson())
        except:
            logger.error("Error al actualizar tvshow.nfo")
            fallidos = -1
        else:
            # ... si ha sido correcto actualizamos la videoteca de Kodi
            if config.is_xbmc() and not silent:
                from platformcode import xbmc_videolibrary
                xbmc_videolibrary.update(FOLDER_TVSHOWS,
                                         filetools.basename(path))

    if fallidos == len(episodelist):
        fallidos = -1

    logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" %
                 (serie.contentSerieName, serie.channel, insertados,
                  sobreescritos, fallidos))
    return insertados, sobreescritos, fallidos
Example #20
0
def list_all(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url, canonical=canonical).data
    soup = create_soup(item.url)
    sectionptn = ''
    pattern = ''
    matches = []
    genericvalues = {
        'recomended': True,
        'more_watched': True,
        'popular': True,
        'search': True,
        'newepisodes': False,
        'allanimes': False,
        '': False
    }

    #==================Fase 1: Detección de patrones==================#
    # Obtenemos la sección especifica (evita conflictos con regex)    #
    # Verificamos qué parte de la función se llama                    #
    # para usar el patrón corecto (o generalizamos)                   #
    # Reciclamos patrones donde sea posible                           #

    # ===== Patrones de novedades (nuevos episodios) =====
    if item.param == 'newepisodes':
        section = soup.find('div', class_='animation-2 items')
    elif genericvalues[item.param] == True:

        if item.param == 'recomended' or item.param == 'more_watched':
            if item.param == 'recomended':  # == Patrones de recomendados ==
                section = soup.find('div', id='slider-tvshows')

            elif item.param == 'more_watched':  # == Patrones de mas vistos ==
                section = soup.find('div', class_='items featured')

        elif item.param == 'popular':  # == Patrones de populares ==
            section = soup.find('div', class_='items featured')

        elif item.param == 'search':  # == Patrones de resultados de búsqueda ==
            section = soup.find('div', class_='search-page')
    elif item.param == 'allanimes':
        section = soup.find('div', id='archive-content')
    else:
        section = soup.find('div', class_='items')

    articles = section.find_all('article')
    for article in articles:
        match = []

        if item.param == 'newepisodes':
            thumb = article.find('img', class_='lazyload')['data-src']
            url = article.find('a')['href']
            epnum = scrapertools.find_single_match(
                article.find('div', class_='epiposter').text, '\d+$')
            title = article.find('div', class_='data').text
            match = [thumb, url, epnum, title]

        elif genericvalues[item.param] == True:
            thumb = article.find('img', class_='lazyload')['data-src']
            fanart = scrapertools.find_single_match(article.find('noscript'),
                                                    'src="([^"]+)')

            if item.param == 'recomended' or item.param == 'more_watched' or item.param == 'popular':
                url = article.find('a')['href']
                title = article.find('div', class_='data').find('h3').text

            elif item.param == 'search':  # == Patrones de resultados de búsqueda ==
                url = article.find('div', class_='title').find('a')['href']
                title = article.find('div', class_='title').text
            match = [thumb, fanart, url, title]

        else:  # == Patrón genérico para páginas comunes ==
            thumb = scrapertools.find_single_match(
                article.find('noscript').text, 'src=["\'](.+?)[\'"]')
            contentType = article.find('div', class_='CategoriaEnPoster').text
            status = article.find('div', class_='estadoposter').text
            url = article.find('div', class_='data').find('a')['href']
            title = article.find('div', class_='data').find('h3').text
            airdate = ''
            year = ''
            plot = article.find('div', class_='texto').text
            genres = article.find('div', class_='genres')
            if article.find("div", class_="data"):
                if article.find("div", class_="data").find("span"):
                    airdate = article.find(
                        "div", class_="data").find("span").text.strip()

            match = [
                thumb, contentType, status, url, title, airdate, year, plot,
                genres
            ]

        matches.append(match)

    #==============Fase 2: Asignación de valores==============#
    # Como cada sección da distintos niveles de información,  #
    # se necesita un ciclo for diferente según el caso        #
    listitem = Item()

    logger.info("item.param: " + str(item.param))
    # >>>> Ciclo para nuevos episodios (lleva directo a findvideos) <<<< #
    if item.param == "newepisodes":
        for scpthumb, scpurl, scpepnum, scptitle in matches:
            conType = ''
            infoLabels = {}
            title, contentTitle, langs = process_title(scptitle.strip(),
                                                       getWithTags=True,
                                                       get_contentTitle=True,
                                                       get_lang=True)
            if scpepnum:
                infoLabels['episode'] = int(scpepnum)
                conType = 'tvshow'
            else:
                conType = 'movie'

            # -----Casi nunca devuelve temporada, pero en raro caso que sí----- #
            scpseason = scrapertools.find_single_match(scpurl, 'season.(\d+)')
            if str(scpseason) is not None:
                infoLabels['season'] = scpseason
            else:
                infoLabels['season'] = None
            itemlist.append(
                Item(action="findvideos",
                     channel=item.channel,
                     contentSerieName=contentTitle,
                     contentTitle=contentTitle,
                     contentType=conType,
                     infoLabels=infoLabels,
                     language=langs,
                     title=title,
                     thumbnail=scpthumb,
                     url=scpurl))

    # >>>> Ciclo para secciones similares (dan 4 variables en mismo orden) <<<< #
    elif genericvalues[item.param]:
        for scpthumb, scpfanart, scpurl, scptitle in matches:
            title, contentTitle, langs = process_title(scptitle.strip(),
                                                       getWithTags=True,
                                                       get_contentTitle=True,
                                                       get_lang=True)
            itemlist.append(
                Item(action="seasons",
                     channel=item.channel,
                     contentSerieName=contentTitle,
                     contentTitle=contentTitle,
                     contentType='tvshow',
                     language=langs,
                     title=title,
                     thumbnail=scpthumb,
                     url=scpurl))

    # >>>> Ciclo para secciones genéricas (casi cualquier página fuera de la principal) <<<< #
    else:
        for scpthumb, scpcontentType, scpstatus, scpurl, scptitle, scpairdate, scpyear, scpplot, scpgenres in matches:
            tagged_title, title, langs = process_title(scptitle.strip(),
                                                       getWithTags=True,
                                                       get_contentTitle=True,
                                                       get_lang=True)
            infoLabels = {"status": scpstatus.strip().title()}

            if scpairdate:
                date = datetime.datetime.strptime(scpairdate, "%b. %d, %Y")
                infoLabels['year'] = date.strftime("%Y")

            if scpgenres:
                genmatch = scpgenres.find_all('a')
                if len(genmatch) > 0:
                    genre = ", ".join([x.text.strip() for x in genmatch])
                    infoLabels['genre'] = genre.strip()

            new_item = Item(action="seasons",
                            channel=item.channel,
                            infoLabels=infoLabels,
                            language=langs,
                            param=item.param,
                            plot=scpplot,
                            title=tagged_title,
                            thumbnail=scpthumb,
                            url=scpurl)

            if scpcontentType == 'pelicula' or 'pelicula' in item.url:
                new_item.contentType = 'movie'
                new_item.contentTitle = title
                if "date" in locals():
                    infoLabels['release_date'] = date.strftime("%Y/%m/%d")

            else:
                new_item.contentType = 'tv'
                new_item.contentSerieName = title
                if "date" in locals():
                    infoLabels['first_air_date'] = date.strftime("%Y/%m/%d")
                    infoLabels['premiered'] = infoLabels['first_air_date']

            itemlist.append(new_item)

    #================================Fase 3: Corrección de valores============================#
    #----------Corregir si es una película en vez de serie o casos raros en el título---------#
    #---Corregir el título según tmdb y limpiar según el contenido (si es serie o película)---#

    # set_infoLabels_async(itemlist)
    for i in itemlist:
        #---Quitamos números de episodio y espacios inútiles---#
        if i.contentType == 'movie':
            i.contentTitle = i.infoLabels['title']
            i.contentSerieName = ''
        else:
            i.contentSerieName = i.infoLabels['title']
            i.contentTitle = ''
        if i.infoLabels['episode']:
            pretext = ''
            if i.infoLabels['season']:
                pretext += 'S' + str(i.infoLabels['season'])
            pretext += 'E' + str(i.infoLabels['episode'])
            i.title = pretext + ': ' + i.title

    # tmdb.set_infoLabels_itemlist(itemlist, force_no_year=True)
    #======================Fase 4: Asignación de paginador (si aplica)======================#
    #---Si se encuentra otra página, se agrega un paginador (solo los items con páginas)---#

    if not genericvalues[item.param]:
        nextpage = get_next_page(data)
        if nextpage:
            itemlist.append(
                Item(action='list_all',
                     channel=item.channel,
                     param=item.param,
                     title='[COLOR=yellow]Siguiente página >[/COLOR]',
                     url=nextpage))
    return itemlist
Example #21
0
def list_all(item):
    logger.info()

    itemlist = []
    soup = get_source(item.url, soup=True)
    if not soup: return []

    if not soup:
        platformtools.dialog_notification(
            "Cambio de estructura",
            "Reporta el error desde el menú principal",
            sound=False)
        return itemlist

    items = soup.find_all('article', id=re.compile(r"^post-(?:\d+|)"))
    # items = soup.find('div', id=' archive-content').find_all('article')

    for article in items:
        data = article.find('div', class_='data')
        year = data.find('p').text
        year = scrapertools.find_single_match(year, '(\d{4})')
        infoLabels = {'year': year, 'genres': data.find('span').text}
        thumbnail = article.find('img')['src']
        title = data.find('h3').text
        url = article.find('a')['href']
        url = "{}%s".format(host) % url

        if 'tmdb.org' in thumbnail:
            infoLabels['filtro'] = scrapertools.find_single_match(
                thumbnail, "/(\w+)\.\w+$")

        it = Item(action='findvideos',
                  channel=item.channel,
                  fanart=item.fanart,
                  infoLabels=infoLabels,
                  thumbnail=thumbnail,
                  title=title,
                  url=url)

        if item.list_type and item.list_type in ['movies', 'tvshows']:
            list_type = item.list_type

        elif item.viewType and item.list_type in ['movies', 'tvshows']:
            list_type = item.list_type

        else:
            if 'serie' in it.url:
                list_type = 'tvshows'

            elif 'pelicula' in it.url:
                list_type = 'movies'

        if list_type == 'tvshows':
            it.action = 'seasons'
            it.contentSerieName = title
            it.contentType = 'tvshow'
            it.viewType = 'episodes'

        elif list_type == 'movies':
            it.contentTitle = title
            it.contentType = 'movie'
            it.viewType = 'movies'

        itemlist.append(it)

    if not isinstance(item.tmdb, bool) or item.tmdb != False:
        tmdb.set_infoLabels(itemlist, True)

    btnnext = soup.find("div", class_="pagPlaydede")

    if btnnext:
        itemlist.append(
            item.clone(title="Siguiente >", url=btnnext.find("a")['href']))
    return itemlist
Example #22
0
def busqueda(item):
    logger.info()
    itemlist = list()

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data,
                                            '<h2>Peliculas</h2>(.*?)</div>')
    bloque += scrapertools.find_single_match(data,
                                             '<h2>Series</h2>(.*?)</div>')

    patron = '<figure class="col-lg-2.*?href="([^"]+)".*?src="([^"]+)".*?<figcaption title="([^"]+)"'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    peliculas = False
    series = False
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        new_item = Item(channel=item.channel,
                        contentType="movie",
                        url=scrapedurl,
                        title="   " + scrapedtitle,
                        text_color=color1,
                        context="buscar_trailer",
                        fulltitle=scrapedtitle,
                        contentTitle=scrapedtitle,
                        thumbnail=scrapedthumbnail,
                        action="findvideos")

        if "/peliculas/" in scrapedurl and not peliculas:
            itemlist.append(
                Item(channel=item.channel,
                     action="",
                     title="Películas",
                     text_color=color2))
            peliculas = True
        if "/series/" in scrapedurl and not series:
            itemlist.append(
                Item(channel=item.channel,
                     action="",
                     title="Series",
                     text_color=color2))
            series = True

        if "/series/" in scrapedurl:
            new_item.contentType = "tvshow"
            new_item.show = scrapedtitle
            new_item.action = "episodios"

        filtro_thumb = scrapedthumbnail.replace(
            "http://image.tmdb.org/t/p/w342", "")
        filtro_list = {"poster_path": filtro_thumb}
        new_item.infoLabels["filtro"] = filtro_list.items()
        itemlist.append(new_item)

    try:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
Example #23
0
def list_all(item):
    logger.info()
    itemlist = []
    soup = create_soup(item.url)
    articles = soup.find(id='main').find_all('table')

    episodes = item.list_what == 'episodes'
    batch = item.list_what == 'batch'
    movies = item.list_what == 'movies'

    for div in articles:
        # Scrapping: Obtenemos los datos
        infoLabels = {}
        link = div.find('a', class_='aa_ss_ops_new')
        title, infoLabels = process_title(link.text,
                                          infoLabels,
                                          get_year_only=True)
        language_list = [
            IDIOMAS.get(x['data-title'], 'VO')
            for x in div.find_all(class_='tooltip3')
            if IDIOMAS.get(x['data-title'], '') in list(['VOS', 'VOSE'])
        ]
        quality_list = [
            x.text for x in div.find_all(class_='load_more_links_buttons')
        ]

        # Creamos el item
        newit = Item(channel=item.channel,
                     infoLabels=infoLabels,
                     language=language_list,
                     quality=quality_list,
                     title=title,
                     url=link['href'])

        # Asignamos propiedades específicas del contentType al item
        if episodes:
            title, labels = newit.title.split(' - ')
            labels = scrapertools.find_multiple_matches(
                labels, '(?is)(\d+)(?:.+?[\(](\w+)[\)])?')[0]
            newit.action = play_direct_action
            newit.contentType = 'tvshow'
            newit.contentEpisode = int(labels[0])
            newit.title = 'E{}: {}'.format(newit.contentEpisode, title)
            if labels[-1]:
                newit.title += ' [{}]'.format(labels[-1])
            newit.contentSerieName = title
        elif batch:
            title, labels = newit.title.split(' - ')
            labels = scrapertools.find_multiple_matches(
                labels, '(?is)(\d+).+?(\d+)(?:.+?[\(](\w+)[\)])?')[0]
            newit.action = 'episodesxseason'
            newit.contentType = 'tvshow'
            newit.title = '{} [{} - {}]'.format(title, labels[0], labels[1])
            logger.info(labels)
            if labels[2]:
                newit.title += ' [{}]'.format(labels[2])
            newit.contentSerieName = title
        elif movies:
            newit.action = 'findvideos'
            newit.contentType = 'movie'
            newit.contentTitle = newit.title

        # Agregamos el item
        itemlist.append(newit)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, no_year=True)
    logger.info(play_direct_action)
    nextpage = soup.find('a', class_='next')
    if nextpage:
        itemlist.append(
            item.clone(text_color='yellow',
                       title='Siguiente página >',
                       url=nextpage['href']))
    return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True):
    """
    guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist
    @type path: str
    @param path: ruta donde guardar los episodios
    @type episodelist: list
    @param episodelist: listado de items que representan los episodios que se van a guardar.
    @type serie: item
    @param serie: serie de la que se van a guardar los episodios
    @type silent: bool
    @param silent: establece si se muestra la notificación
    @param overwrite: permite sobreescribir los ficheros existentes
    @type overwrite: bool
    @rtype insertados: int
    @return:  el número de episodios insertados
    @rtype sobreescritos: int
    @return:  el número de episodios sobreescritos
    @rtype fallidos: int
    @return:  el número de episodios fallidos
    """
    logger.info()

    # No hay lista de episodios, no hay nada que guardar
    if not len(episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    insertados = 0
    sobreescritos = 0
    fallidos = 0
    news_in_playcounts = {}

    # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
    raiz, carpetas_series, ficheros = filetools.walk(path).next()
    ficheros = [filetools.join(path, f) for f in ficheros]

    # Silent es para no mostrar progreso (para library_service)
    if not silent:
        # progress dialog
        p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...')
        p_dialog.update(0, 'Aggiunta episodio...')

    new_episodelist =[]
    # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean
    for e in episodelist:
        try:
            season_episode = scrapertools.get_season_and_episode(e.title)

            e.infoLabels = serie.infoLabels
            e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
            new_episodelist.append(e)
        except:
            continue

    # No hay lista de episodios, no hay nada que guardar
    if not len(new_episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    # fix float porque la division se hace mal en python 2.x
    t = float(100) / len(new_episodelist)

    for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
        if not silent:
            p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title)


        season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2))
        strm_path = filetools.join(path, "%s.strm" % season_episode)
        nfo_path = filetools.join(path, "%s.nfo" % season_episode)
        json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())

        strm_exists = strm_path in ficheros
        nfo_exists = nfo_path in ficheros
        json_exists = json_path in ficheros

        if not strm_exists:
            # Si no existe season_episode.strm añadirlo
            item_strm = Item(action='play_from_library', channel='biblioteca',
                             strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={})
            item_strm.contentSeason = e.contentSeason
            item_strm.contentEpisodeNumber = e.contentEpisodeNumber
            item_strm.contentType = e.contentType
            item_strm.contentTitle = season_episode

            # FILTERTOOLS
            if item_strm.list_idiomas:
                # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar
                if "library_filter_show" in serie:
                    item_strm.library_filter_show = serie.library_filter_show

                if item_strm.library_filter_show == "":
                    logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar")

            # logger.debug("item_strm" + item_strm.tostring('\n'))
            # logger.debug("serie " + serie.tostring('\n'))
            strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl()))

        item_nfo = None
        if not nfo_exists and e.infoLabels["code"]:
            # Si no existe season_episode.nfo añadirlo
            scraper.find_and_set_infoLabels(e)
            head_nfo = scraper.get_nfo(e)

            item_nfo = e.clone(channel="biblioteca", url="", action='findvideos',
                               strm_path=strm_path.replace(TVSHOWS_PATH, ""))

            nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson())


        # Solo si existen season_episode.nfo y season_episode.strm continuamos
        if nfo_exists and strm_exists:
            if not json_exists or overwrite:
                # Obtenemos infoLabel del episodio
                if not item_nfo:
                    head_nfo, item_nfo = read_nfo(nfo_path)

                e.infoLabels = item_nfo.infoLabels

                if filetools.write(json_path, e.tojson()):
                    if not json_exists:
                        logger.info("Insertado: %s" % json_path)
                        insertados += 1
                        # Marcamos episodio como no visto
                        news_in_playcounts[season_episode] = 0
                        # Marcamos la temporada como no vista
                        news_in_playcounts["season %s" % e.contentSeason] = 0
                        # Marcamos la serie como no vista
                        # logger.debug("serie " + serie.tostring('\n'))
                        news_in_playcounts[serie.contentTitle] = 0

                    else:
                        logger.info("Sobreescrito: %s" % json_path)
                        sobreescritos += 1
                else:
                    logger.info("Fallido: %s" % json_path)
                    fallidos += 1

        else:
            logger.info("Fallido: %s" % json_path)
            fallidos += 1


        if not silent and p_dialog.iscanceled():
            break

    if not silent:
        p_dialog.close()

    if news_in_playcounts:
        # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ...
        tvshow_path = filetools.join(path, "tvshow.nfo")
        try:
            import datetime
            head_nfo, tvshow_item = read_nfo(tvshow_path)
            tvshow_item.library_playcounts.update(news_in_playcounts)

            if tvshow_item.active == 30:
                tvshow_item.active = 1
            update_last = datetime.date.today()
            tvshow_item.update_last = update_last.strftime('%Y-%m-%d')
            update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active))
            tvshow_item.update_next = update_next.strftime('%Y-%m-%d')

            filetools.write(tvshow_path, head_nfo + tvshow_item.tojson())
        except:
            logger.error("Error al actualizar tvshow.nfo")
            fallidos = -1
        else:
            # ... si ha sido correcto actualizamos la biblioteca de Kodi
            if config.is_xbmc() and not silent:
                from platformcode import xbmc_library
                xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path))

    if fallidos == len(episodelist):
        fallidos = -1

    logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" %
                 (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos))
    return insertados, sobreescritos, fallidos
Example #25
0
def search(item, texto):
    logger.info()
    itemlist = []

    texto = texto.replace(" ", "+")
    post = "value=%s&limit=100" % texto

    if clone:
        item.url = "{}browse?q={}".format(HOST, texto)
    else:
        item.url = urlparse.urljoin(HOST, "api/animes/search")

    try:
        if clone:
            response = httptools.downloadpage(item.url).data
            response = scrapertools.find_single_match(
                response, 'class="ListAnimes.+?</ul>')
            patron = '(?is)article class.+?a href="(.+?)".+?img src="(.+?)".+?class="type.+?>(.+?)<.+?class="Title".*?>(.+?)<.+?class="des".*?>(.+?)</p'
            matches = scrapertools.find_multiple_matches(response, patron)
            for url, thumb, _type, title, plot in matches:
                _type = _type.lower()
                url = urlparse.urljoin(HOST, url)
                it = Item(action="episodios",
                          contentType="tvshow",
                          channel=item.channel,
                          plot=plot,
                          thumbnail=thumb,
                          title=title,
                          url=url)
                if "película" in _type:
                    it.contentType = "movie"
                    it.contentTitle = title
                else:
                    it.contentSerieName = title
                    it.context = renumbertools.context(item)
                itemlist.append(it)
        else:
            dict_data = httptools.downloadpage(item.url, post=post).json
            for e in dict_data:
                if e["id"] != e["last_id"]:
                    _id = e["last_id"]
                else:
                    _id = e["id"]
                url = "%sanime/%s/%s" % (HOST, _id, e["slug"])
                title = e["title"]
                #if "&#039;" in title:
                #    title = title.replace("&#039;","")
                #if "&deg;" in title:
                #    title = title.replace("&deg;","")
                thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"])
                new_item = item.clone(action="episodios",
                                      title=title,
                                      url=url,
                                      thumbnail=thumbnail)
                if e["type"] != "movie":
                    new_item.contentSerieName = title
                    new_item.context = renumbertools.context(item)
                else:
                    new_item.contentType = "movie"
                    new_item.contentTitle = title
                itemlist.append(new_item)
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist