def get_episodios(item): logger.info("pelisalacarta.channels.biblioteca get_episodios") itemlist = [] # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Crear un item en la lista para cada strm encontrado for i in ficheros: # strm if i.endswith(".strm"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item().fromurl(filetools.read(filetools.join(raiz, i))) epi.contentChannel = item.contentChannel epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "findvideos" epi.contentEpisodeNumber = episode epi.contentSeason = season # fix sobreescribe el color del texto si viene pasado en el strm epi.text_color = "" # fix para que no se ejecute el método de play para la biblioteca de Kodi epi.strm = False itemlist.append(epi) # videos elif not i.endswith(".nfo") and not i.endswith( ".json") and not i.endswith(".srt"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item() epi.contentChannel = "local" epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "play" epi.contentEpisodeNumber = episode epi.contentSeason = season itemlist.append(epi) library.set_infolabels_from_library(itemlist, tipo="Episodes") return sorted(itemlist, key=get_sort_temp_epi)
def get_episodios(item): logger.info("pelisalacarta.channels.biblioteca get_episodios") itemlist = [] # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Crear un item en la lista para cada strm encontrado for i in ficheros: # strm if i.endswith(".strm"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item().fromurl(filetools.read(filetools.join(raiz, i))) epi.contentChannel = item.contentChannel epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "findvideos" epi.contentEpisodeNumber = episode epi.contentSeason = season # fix sobreescribe el color del texto si viene pasado en el strm epi.text_color = "" # fix para que no se ejecute el método de play para la biblioteca de Kodi epi.strm = False itemlist.append(epi) # videos elif not i.endswith(".nfo") and not i.endswith(".json") and not i.endswith(".srt"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item() epi.contentChannel = "local" epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "play" epi.contentEpisodeNumber = episode epi.contentSeason = season itemlist.append(epi) library.set_infolabels_from_library(itemlist, tipo="Episodes") return sorted(itemlist, key=get_sort_temp_epi)
def episodesxseason(item): logger.info() itemlist = [] episodios = item.json_episodios for episodio in episodios: infoLabels = item.infoLabels language = 'VOSE' if not item.language: if episodio.get('estado'): if 'SUBT' in episodio.get('estado'): language = 'VOSE' else: language = item.language it = Item( action = 'findvideos', channel = item.channel, infoLabels = infoLabels, language = language, thumbnail = item.thumbnail, title = episodio['capitulo'], urls = episodio['opciones'], url = item.url ) if not item.contentType == 'movie': it.contentSeason = item.contentSeason it.contentEpisodeNumber = episodio['capitulo'] itemlist.append(it) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True) for it in itemlist: it.title = unify.add_languages('{}x{}: {}'.format(it.contentSeason, it.contentEpisodeNumber, it.contentTitle), it.language) if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def novedades_episodios(item): logger.info("pelisalacarta.channels.animeflv novedades") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="not"> <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12"> <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0"> <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span> </a> </div> ''' patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" ' \ 'src="([^"]+)"[^<]+<span class="tit_ep"><span class="tit">([^<]+)<' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] for match in matches: scrapedtitle = scrapertools.entityunescape(match[3]) fulltitle = scrapedtitle # directory = match[1] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = urlparse.urljoin( item.url, match[2].replace("mini", "portada")) scrapedplot = "" #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(scrapedtitle, scrapedurl, scrapedthumbnail)) new_item = Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle) content_title = scrapertools.entityunescape(match[1]) if content_title: episode = scrapertools.get_match(content_title, '\s+(\d+)$') content_title = content_title.replace(episode, '') season, episode = numbered_for_tratk(content_title, 1, episode) new_item.hasContentDetails = "true" new_item.contentTitle = content_title new_item.contentSeason = season new_item.contentEpisodeNumber = int(episode) itemlist.append(new_item) return itemlist
def episodesxseason(item): logger.info() itemlist = [] # Los episodios deben venir en un JSON en el item episodios = item.json_episodios for episodio in episodios: infoLabels = item.infoLabels language = 'VOSE' # Asignación de idioma if not item.language: if episodio.get('estado'): if 'SUBT' in episodio.get('estado'): language = 'VOSE' else: language = item.language it = Item(action='findvideos', channel=item.channel, infoLabels=infoLabels, language=language, thumbnail=item.thumbnail, title=episodio['capitulo'], urls=episodio['opciones'], url=item.url) # Determinación dinámica de contentType if not item.contentType == 'movie': it.contentSeason = item.contentSeason it.contentEpisodeNumber = episodio['capitulo'] itemlist.append(it) # Asignación de infoLabels if not item.videolibrary: tmdb.set_infoLabels(itemlist, True) # Formateamos título for it in itemlist: it.title = unify.add_languages( '{}x{}: {}'.format(it.contentSeason, it.contentEpisodeNumber, it.contentTitle), it.language) # Si es peli, mandamos directo a findvideos if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def novedades_episodios(item): logger.info("pelisalacarta.channels.animeflv novedades") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="not"> <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12"> <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0"> <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span> </a> </div> ''' patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" ' \ 'src="([^"]+)"[^<]+<span class="tit_ep"><span class="tit">([^<]+)<' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] for match in matches: scrapedtitle = scrapertools.entityunescape(match[3]) fulltitle = scrapedtitle # directory = match[1] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = urlparse.urljoin(item.url, match[2].replace("mini", "portada")) scrapedplot = "" #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(scrapedtitle, scrapedurl, scrapedthumbnail)) new_item = Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle) content_title = scrapertools.entityunescape(match[1]) if content_title: episode = scrapertools.get_match(content_title, '\s+(\d+)$') content_title = content_title.replace(episode, '') season, episode = numbered_for_tratk(content_title, 1, episode) new_item.hasContentDetails = "true" new_item.contentTitle = content_title new_item.contentSeason = season new_item.contentEpisodeNumber = int(episode) itemlist.append(new_item) return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') new_episodelist =[] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean for e in episodelist: try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def save_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] nostrm_episodelist = [] for root, folders, files in filetools.walk(path): for file in files: season_episode = scrapertools.get_season_and_episode(file) if season_episode == "" or filetools.exists( filetools.join(path, "%s.strm" % season_episode)): continue nostrm_episodelist.append(season_episode) nostrm_episodelist = sorted(set(nostrm_episodelist)) # Silent es para no mostrar progreso (para videolibrary_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress( config.get_localized_string(20000), config.get_localized_string(60064)) p_dialog.update(0, config.get_localized_string(60065)) new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean tags = [] if config.get_setting("enable_filter", "videolibrary"): tags = [ x.strip() for x in config.get_setting( "filters", "videolibrary").lower().split(",") ] for e in episodelist: if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags): continue try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str( e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in nostrm_episodelist: continue strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_language: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error( "Se ha producido un error al obtener el nombre de la serie a filtrar" ) # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write( strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentSerieName] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta( days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la videoteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<div class="MiniFicha">.*?' patron += '<img src="([^"]+).*?' patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?' patron += '<div class="OpcionesDescargasMini">(.*?)</div>' matches = re.compile(patron, re.DOTALL).findall(data) for thumbnail, title, cat_padres, cat_hijos, opciones in matches: #logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # Obtenemos el año del titulo y eliminamos lo q sobre patron = '\d{4}$' year = scrapertools.find_single_match(title, patron) if year: title = re.sub(patron, "", title) patron = '\s?-?\s?(line)?\s?-\s?$' regex = re.compile(patron, re.I) title = regex.sub("", title) # Obtenemos la imagen b por q es mayor thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] # Buscamos opcion de ver online patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' url_ver = scrapertools.find_single_match(opciones, patron) if url_ver: new_item = Item(channel=item.channel, action="findvideos", title=title, thumbnail=thumbnail, url=url_ver, infoLabels={"year": year}, text_color=color1) cat_padres = cat_padres.strip() if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]: #if item.extra == 'movie': new_item.contentTitle = title new_item.extra = "movie" # Filtramos nombres validos para la calidad patron = ("rip|dvd|screener|hd|ts|Telesync") if re.search(patron, cat_hijos, flags=re.IGNORECASE): new_item.contentQuality = cat_hijos new_item.title = "%s [%s]" % (title, cat_hijos) elif cat_padres == "peliculas-dvdrip": new_item.contentQuality = "DVDRIP" new_item.title = "%s [DVDRIP]" % title elif cat_padres == "HDRIP": new_item.contentQuality = "HDRIP" new_item.title = "%s [HDRIP]" % title elif cat_padres == "series": new_item.contentSerieName = cat_hijos patron = re.compile('(\d+)x(\d+)') matches = patron.findall(title) if len(matches) == 1: new_item.contentSeason = matches[0][0] new_item.contentEpisodeNumber = matches[0][1].zfill(2) new_item.extra = "episodie" else: # matches == [('1', '01'), ('1', '02'), ('1', '03')] new_item.extra = "multi-episodie" else: #Otras categorias q de momento no nos interesan continue ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)' new_item.url_descarga = scrapertools.find_single_match(opciones,patron) patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?' new_item.url_torrent = scrapertools.find_single_match(opciones,patron)''' itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion patron = '<div class="sPages">.*?' patron += '<a href="([^"]+)">Siguiente' url_next_page = scrapertools.find_single_match(data, patron) if url_next_page: itemlist.append( Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True, text_color=color3, text_blod=True)) return itemlist
def listado(item): #import json logger.info("[pepecine.py] listado") itemlist = [] try: data_dict = jsontools.load_json(scrapertools.cache_page(item.url)) except: return itemlist # Devolvemos lista vacia offset = scrapertools.get_match(item.url, 'offset=(\d*)') limit = scrapertools.get_match(item.url, 'limit=(\d*)') for i in data_dict["result"]: infoLabels = {} idioma = '' if item.extra == "movie": action = "get_movie" title = i["title"] + ' (' + i['year'] + ')' url = urlparse.urljoin( __url_base__, "peliculas-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) elif item.extra == "series": action = "get_temporadas" title = i["title"] infoLabels['tvshowtitle'] = i["title"] url = urlparse.urljoin( __url_base__, "series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) else: #item.extra=="series_novedades": action = "get_only_episodio" infoLabels['season'] = i['season'] infoLabels['episode'] = i['episode'].zfill(2) item.extra = infoLabels["season"] + "x" + infoLabels["episode"] infoLabels['tvshowtitle'] = i["title"] flag = scrapertools.find_single_match(i["label"], '(\s*\<img src=.*\>)') idioma = i["label"].replace(flag, "") title = i["title"] + ' ' + item.extra + ' (' + idioma + ')' url = urlparse.urljoin( __url_base__, "series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) if i.has_key("poster") and i["poster"]: thumbnail = re.compile("/w\d{3}/").sub("/w500/", i["poster"]) else: thumbnail = item.thumbnail if i.has_key("background") and i["background"]: fanart = i["background"] else: fanart = item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id'] = i[ 'id'] # title_id: identificador de la pelicula/serie en pepecine.com infoLabels['titleraw'] = i[ "title"] # titleraw: titulo de la pelicula/serie sin formato if i['genre']: infoLabels['genre'] = i['genre'] if i['year']: infoLabels['year'] = i['year'] if i['tagline']: infoLabels['plotoutline'] = i['tagline'] if i['plot']: infoLabels['plot'] = i['plot'] else: infoLabels['plot'] = "" if i['runtime']: infoLabels['duration'] = int(i['runtime']) * 60 if i['imdb_rating']: infoLabels['rating'] = i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating'] = i['tmdb_rating'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year = i['year'] newItem.contentTitle = i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Paginacion if int(data_dict["total_results"]) == int(limit): url = item.url.replace('offset=' + offset, 'offset=' + str(int(offset) + int(limit))) itemlist.append( Item(channel=item.channel, action="listado", text_color="0xFF994D00", title=">> Pagina siguiente >>", thumbnail=item.thumbnail, url=url, extra=item.extra, fanart=fanart_host)) return itemlist
def listado(item): logger.info(item) itemlist = [] data = downloadpage(item.url) patron = '<a class="extended" href=".([^"]+).*?' patron += '<img class="centeredPicFalse"([^>]+).*?' patron += '<span class="year">(\d{4})</span>.*?' patron += '<span class="title">(.*?)</span>' for url, pic, year, title in scrapertools.find_multiple_matches(data, patron): thumbnail = scrapertools.find_single_match(pic, 'src="([^"]+)') if not thumbnail: thumbnail = HOST + "/images/cover-notfound.png" new_item = Item(channel=__channel__, thumbnail=thumbnail, text_color=color2, infoLabels={"year":year}) if "galep.php" in item.url: # movie new_item.contentTitle = title new_item.action = "findvideos" new_item.url = HOST + url.replace('peli.php?id=', 'venlaces.php?npl=') elif "gales.php" in item.url: # tvshow title = title.replace(' - 0x0', '') new_item.contentSerieName = title new_item.action = "temporadas" new_item.url = HOST + url if "ord=reci" in item.url: # episode season_episode = scrapertools.get_season_and_episode(title) if season_episode: new_item.contentSeason, new_item.contentEpisodeNumber = season_episode.split('x') new_item.action = "get_episodio" new_item.contentSerieName = title.split('-',1)[1].strip() elif "gener=" in item.url and scrapertools.get_season_and_episode(title): # Las series filtrada por genero devuelven capitulos y series completas title = title.split('-', 1)[1].strip() new_item.contentSerieName = title else: return [] new_item.title = "%s (%s)" % (title, year) itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion if len(itemlist) == 35: empen = scrapertools.find_single_match(item.url, 'empen=(\d+)') url_next_page = item.url.replace('empen=%s' % empen, 'empen=%s' % (int(empen) + 35)) itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url= url_next_page, folder=True, text_color = color3, text_blod=True)) return itemlist
def mainlist(item): logger.info("pelisalacarta.channels.descargas mainlist") itemlist = [] status_color = {0: "orange", 1: "orange", 2: "green", 3: "red"} item.url = config.get_setting("downloadlistpath") series = {} for file in sorted(filetools.listdir(item.url)): file = os.path.join(item.url, file) # Saltamos los archivos que no sean .json if not file.endswith(".json"): continue i = Item().fromjson(filetools.read(file)) i.path = file i.thumbnail = i.contentThumbnail #item.serie_name significa que estamos desntro de una serie if not item.serie_name: # Series if i.contentType == "episode": serie_name = "%s [%s]" % (i.contentSerieName, i.contentChannel) if not serie_name in series: title = serie_name series = { serie_name: { "downloadProgress": [i.downloadProgress], "downloadStatus": [i.downloadStatus] } } i.title = title i.contentSeason = "" i.contntEpisodeNumber = "" i.action = "mainlist" i.serie_name = serie_name itemlist.append(i) else: series[serie_name]["downloadProgress"].append( i.downloadProgress) series[serie_name]["downloadStatus"].append( i.downloadStatus) # Peliculas elif i.contentType == "movie": i.title = "[COLOR %s][%i%%][/COLOR] %s" % (status_color[ i.downloadStatus], i.downloadProgress, i.contentTitle) itemlist.append(i) else: if i.contentType == "episode": serie_name = "%s [%s]" % (i.contentSerieName, i.contentChannel) if serie_name == item.serie_name: i.title = "[COLOR %s][%i%%][/COLOR] %s" % (status_color[ i.downloadStatus], i.downloadProgress, i.contentTitle) itemlist.append(i) for i in itemlist: if i.title in series: if any(series[i.title]["downloadStatus"]) == 3: downloadStatus = 3 elif all(series[i.title]["downloadStatus"]) == 2: downloadStatus = 2 else: downloadStatus = 1 downloadProgress = sum(series[i.title]["downloadProgress"]) / len( series[i.title]["downloadProgress"]) i.title = "[COLOR %s][%i%%][/COLOR] %s [%s]" % ( status_color[downloadStatus], downloadProgress, i.contentSerieName, i.contentChannel) i.downloadStatus = downloadStatus estados = [i.downloadStatus for i in itemlist] # Si hay alguno completado if 2 in estados: itemlist.insert( 0, Item(channel=item.channel, action="clean_ready", title="Eliminar descargas completadas", url=config.get_setting("downloadlistpath"), serie_name=item.serie_name)) # Si hay alguno con error if 3 in estados: itemlist.insert( 0, Item(channel=item.channel, action="restart_error", title="Reiniciar descargas con error", url=config.get_setting("downloadlistpath"), serie_name=item.serie_name)) # Si hay alguno pendiente if 1 in estados or 0 in estados: itemlist.insert( 0, Item(channel=item.channel, action="download_all", title="Descargar todo", url=config.get_setting("downloadlistpath"), serie_name=item.serie_name)) if len(itemlist): itemlist.insert( 0, Item(channel=item.channel, action="clean_all", title="Eliminar todo", url=config.get_setting("downloadlistpath"), serie_name=item.serie_name)) return itemlist
def seasons(item): logger.info() itemlist = [] seasons = [] soup = get_source(item.url, soup=True, ignore_response_code=True) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) # NOTE: API para buscar episodios no da resultados, verificar después # NOTE[2]: La API esta sigue sin funcionar. Probablemente la descartaré completamente # Buscamos el "content_id", requerido para búsqueda en la API de la página # Esto ya no funciona, la API cambió y no deja buscar por ID # content_id = json['props']['pageProps'].get('id') # Obtenemos el JSON con los episodios desde la API para clasificar temporadas (vienen en lotes) # episodios = get_source('https://fapi.comamosramen.com/api/byUniqId/{}'.format(content_id), json=True) seriesdata = json['props']['pageProps']['data'] seasons = seriesdata['seasons'] # Recorremos la lista de temporadas para procesamiento for season in seasons: title, language = set_lang(seriesdata['title']) infoLabels = {'year': seriesdata['metadata'].get('year')} ogtitle = title if item.language: language = item.language elif seriesdata['metadata'].get('tags'): language = 'LAT' if 'Audio Latino' in seriesdata['metadata'].get( 'tags') else language contentType = 'movie' if seriesdata.get( 'type') and seriesdata['type'].lower() in ['pelicula' ] else 'tvshow' it = Item(action='episodesxseason', channel=item.channel, contentType=contentType, contentSeason=season['season'], infoLabels=infoLabels, json_episodios=season['episodes'], language=language, plot=seriesdata['description'], thumbnail=item.thumbnail, title=(config.get_localized_string(60027) % str(season['season'])), url=item.url, viewType='episodes') # Asignamos valores al item según su contentType if contentType == 'movie': it.contentTitle = ogtitle else: it.contentSeason = season['season'] it.contentSerieName = ogtitle itemlist.append(it) # Asignamos las infoLabels (si aplica) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True, force_no_year=True) # Si solo hay una temporada, retornamos directamente los episodios if len(itemlist) == 1: itemlist = episodesxseason(itemlist[0]) if not len(itemlist) > 0: return [] # Agregamos elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and not itemlist[0].contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_serie_to_library", channel=item.channel, contentType='tvshow', contentSerieName=item.contentSerieName, extra="episodios", title='[COLOR yellow]{}[/COLOR]'.format( config.get_localized_string(60352)), url=item.url)) return itemlist
def seasons(item): logger.info() itemlist = [] seasons = [] # Obtenemos el HTML y cargamos el JSON soup = create_soup(item.url) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) # Buscamos el "content_id", requerido para búsqueda en la API de la página content_id = json['props']['pageProps'].get('id') if not content_id: id_ = item.url.replace(host, '').split('/')[2].split('-', 1) content_id = '{}-{}'.format(id_[0], id_[1].replace('-', '%20')) # Obtenemos el JSON con los episodios desde la API para clasificar temporadas (vienen en lotes) episodios = httptools.downloadpage( 'https://fapi.comamosramen.com/api/byUniqId/{}'.format( content_id)).json # Recorremos la lista de episodios y obtenemos las temporadas según haya diferencias entre c/ep for episodio in episodios['temporadas']: if len(seasons) > 0 and seasons[-1]['temporada'] == int( episodio['temporada']): seasons[-1]['episodios'].append(episodio) else: seasons.append({ 'temporada': int(episodio['temporada']), 'episodios': [] }) seasons[-1]['episodios'].append(episodio) # Recorremos la lista de temporadas para procesamiento for season in seasons: title, language = set_lang(episodios.get('titulo')) infoLabels = {'year': episodios.get('año')} ogtitle = title # Determinación del idioma if item.language: language = item.language if episodios.get('categorias'): if 'Audio Latino' in episodios.get('categorias'): language = 'LAT' # Determinación dinámica del contentType if episodios.get('tipo'): if episodios.get('tipo') in ['pelicula']: contentType = 'movie' else: contentType = 'tvshow' else: contentType = '' it = Item(action='episodesxseason', channel=item.channel, contentType=contentType, infoLabels=infoLabels, json_episodios=season['episodios'], language=language, plot=episodios.get('descripcion'), thumbnail=item.thumbnail, title=unify.add_languages( (config.get_localized_string(60027) % str(season['temporada'])), language), url=item.url) # Asignamos valores al item según su contentType if contentType == 'movie': it.contentTitle = ogtitle else: it.contentSeason = season['temporada'] it.contentSerieName = ogtitle itemlist.append(it) # Asignamos las infoLabels (si aplica) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True, force_no_year=True) # Si solo hay una temporada, retornamos directamente los episodios if len(itemlist) == 1: itemlist = episodesxseason(itemlist[0]) # Agregamos elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and not itemlist[0].contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_serie_to_library", channel=item.channel, contentType='tvshow', contentSerieName=item.contentSerieName, extra="episodios", title='[COLOR yellow]{}[/COLOR]'.format( config.get_localized_string(60352)), url=item.url)) return itemlist
def seasons(item): logger.info() itemlist = [] soup = create_soup(item.url) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) content_id = json['props']['pageProps'].get('id') if not content_id: id_ = item.url.replace(host, '').split('/')[2].split('-', 1) content_id = '{}-{}'.format(id_[0], id_[1].replace('-', '%20')) episodios = httptools.downloadpage('https://fapi.comamosramen.com/api/byUniqId/{}'.format(content_id)).json seasons = [] for episodio in episodios['temporadas']: if len(seasons) > 0 and seasons[-1]['temporada'] == int(episodio['temporada']): seasons[-1]['episodios'].append(episodio) else: seasons.append({'temporada': int(episodio['temporada']), 'episodios': []}) seasons[-1]['episodios'].append(episodio) for season in seasons: title, language = set_lang(episodios.get('titulo')) ogtitle = title # if scrapertools.find_single_match(ogtitle, '(?is)Título original (.*?)\n'): # ogtitle = scrapertools.find_single_match(ogtitle, '(?is)Título original (.*?)\n') if item.language: language = item.language if episodios.get('categorias'): if 'Audio Latino' in episodios.get('categorias'): language = 'LAT' if episodios.get('tipo'): if episodios.get('tipo') in ['pelicula']: contentType = 'movie' else: contentType = 'tvshow' else: contentType = '' infoLabels = {'year': episodios.get('año')} it = Item( action = 'episodesxseason', channel = item.channel, contentType = contentType, infoLabels = infoLabels, json_episodios = season['episodios'], language = language, plot = episodios.get('descripcion'), thumbnail = item.thumbnail, title = unify.add_languages((config.get_localized_string(60027) % str(season['temporada'])), language), url = item.url ) if contentType == 'movie': it.contentTitle = ogtitle else: it.contentSeason = season['temporada'] it.contentSerieName = ogtitle # unify.title_format(it) itemlist.append(it) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True, force_no_year = True) if len(itemlist) == 1: itemlist = episodesxseason(itemlist[0]) if len(itemlist) > 0 and config.get_videolibrary_support() and not itemlist[0].contentType == 'movie' and not item.videolibrary: itemlist.append( Item( action = "add_serie_to_library", channel = item.channel, contentType = 'tvshow', contentSerieName = item.contentSerieName, extra = "episodios", title = '[COLOR yellow]{}[/COLOR]'.format(config.get_localized_string(60352)), url = item.url ) ) return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} if overwrite == "everything": overwrite = True overwrite_everything = True else: overwrite_everything = False # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') # fix float porque la division se hace mal en python 2.x t = float(100) / len(episodelist) for i, e in enumerate(episodelist): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) try: season_episode = scrapertools.get_season_and_episode( e.title.lower()) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) except: continue strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros strm_exists_before = True nfo_exists_before = True json_exists_before = True if not strm_exists or overwrite_everything: if not overwrite_everything: strm_exists_before = False # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error( "Se ha producido un error al obtener el nombre de la serie a filtrar" ) # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write( strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if (not nfo_exists or overwrite_everything) and e.infoLabels.get("imdb_id"): if not overwrite_everything: nfo_exists_before = False # Si no existe season_episode.nfo añadirlo if e.infoLabels["tmdb_id"]: scraper.find_and_set_infoLabels(e) head_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % ( e.infoLabels['tmdb_id'], e.contentSeason, e.contentEpisodeNumber) elif e.infoLabels["tvdb_id"]: head_nfo = e.url_scraper else: head_nfo = "Aqui ira el xml" # TODO item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists or overwrite_everything: if not overwrite_everything: json_exists_before = False logger.info("Insertado: %s" % json_path) else: logger.info("Sobreescritos todos los archivos!") # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 if (not overwrite_everything and not json_exists): json_exists = True else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if (not strm_exists_before or not nfo_exists_before or not json_exists_before): if (strm_exists and nfo_exists and json_exists): insertados += 1 else: logger.error("El archivo strm, nfo o json no existe") if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta( days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 # ... y actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): #import json logger.info("[pepecine.py] listado") itemlist = [] try: data_dict = jsontools.load_json(scrapertools.cache_page(item.url)) except: return itemlist # Devolvemos lista vacia offset= scrapertools.get_match(item.url,'offset=(\d*)') limit= scrapertools.get_match(item.url,'limit=(\d*)') for i in data_dict["result"]: infoLabels={} idioma ='' if item.extra=="movie": action="get_movie" title= i["title"] + ' (' + i['year'] + ')' url= urlparse.urljoin(__url_base__,"peliculas-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) elif item.extra=="series": action="get_temporadas" title= i["title"] infoLabels['tvshowtitle']= i["title"] url= urlparse.urljoin(__url_base__,"series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) else: #item.extra=="series_novedades": action="get_only_episodio" infoLabels['season']=i['season'] infoLabels['episode']=i['episode'].zfill(2) item.extra=infoLabels["season"] + "x" + infoLabels["episode"] infoLabels['tvshowtitle']= i["title"] flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)') idioma=i["label"].replace(flag,"") title=i["title"] + ' ' + item.extra + ' (' + idioma + ')' url= urlparse.urljoin(__url_base__,"series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) if i.has_key("poster") and i["poster"]: thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"]) else: thumbnail= item.thumbnail if i.has_key("background") and i["background"]: fanart= i["background"] else: fanart= item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com infoLabels['titleraw']= i["title"] # titleraw: titulo de la pelicula/serie sin formato if i['genre']: infoLabels['genre']=i['genre'] if i['year']: infoLabels['year']=i['year'] if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot']=i['plot'] else: infoLabels['plot']="" if i['runtime']: infoLabels['duration']=int(i['runtime'])*60 if i['imdb_rating']: infoLabels['rating']=i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating']=i['tmdb_rating'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year=i['year'] newItem.contentTitle=i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Paginacion if int(data_dict["total_results"]) == int(limit): url=item.url.replace('offset='+offset,'offset='+ str(int(offset)+ int(limit))) itemlist.append( Item( channel=item.channel, action="listado", text_color="0xFF994D00", title=">> Pagina siguiente >>", thumbnail=item.thumbnail, url=url, extra=item.extra, fanart=fanart_host) ) return itemlist
def mainlist(item): logger.info("pelisalacarta.channels.descargas mainlist") itemlist = [] status_color = {0: "orange", 1: "orange", 2: "green", 3: "red"} item.url = config.get_setting("downloadlistpath") series = {} for file in sorted(filetools.listdir(item.url)): file = os.path.join(item.url, file) # Saltamos los archivos que no sean .json if not file.endswith(".json"): continue i = Item().fromjson(filetools.read(file)) i.path = file i.thumbnail = i.contentThumbnail #item.serie_name significa que estamos desntro de una serie if not item.serie_name: # Series if i.contentType == "episode": serie_name = "%s [%s]" % (i.contentSerieName, i.contentChannel) if not serie_name in series: title = serie_name series = {serie_name: {"downloadProgress": [i.downloadProgress], "downloadStatus": [i.downloadStatus]}} i.title=title i.contentSeason = "" i.contntEpisodeNumber = "" i.action = "mainlist" i.serie_name = serie_name itemlist.append(i) else: series[serie_name]["downloadProgress"].append(i.downloadProgress) series[serie_name]["downloadStatus"].append(i.downloadStatus) # Peliculas elif i.contentType == "movie": i.title = "[COLOR %s][%i%%][/COLOR] %s" % (status_color[i.downloadStatus], i.downloadProgress, i.contentTitle) itemlist.append(i) else: if i.contentType == "episode": serie_name = "%s [%s]" % (i.contentSerieName, i.contentChannel) if serie_name == item.serie_name: i.title = "[COLOR %s][%i%%][/COLOR] %s" % (status_color[i.downloadStatus], i.downloadProgress, i.contentTitle) itemlist.append(i) for i in itemlist: if i.title in series: if any(series[i.title]["downloadStatus"]) == 3: downloadStatus = 3 elif all(series[i.title]["downloadStatus"]) == 2: downloadStatus = 2 else: downloadStatus = 1 downloadProgress = sum(series[i.title]["downloadProgress"]) / len(series[i.title]["downloadProgress"]) i.title = "[COLOR %s][%i%%][/COLOR] %s [%s]" % (status_color[downloadStatus], downloadProgress, i.contentSerieName, i.contentChannel) i.downloadStatus = downloadStatus estados = [i.downloadStatus for i in itemlist] # Si hay alguno completado if 2 in estados: itemlist.insert(0, Item(channel=item.channel, action="clean_ready", title="Eliminar descargas completadas", url=config.get_setting("downloadlistpath"), serie_name = item.serie_name)) # Si hay alguno con error if 3 in estados: itemlist.insert(0, Item(channel=item.channel, action="restart_error", title="Reiniciar descargas con error", url=config.get_setting("downloadlistpath"), serie_name = item.serie_name)) # Si hay alguno pendiente if 1 in estados or 0 in estados: itemlist.insert(0, Item(channel=item.channel, action="download_all", title="Descargar todo", url=config.get_setting("downloadlistpath"), serie_name = item.serie_name)) if len(itemlist): itemlist.insert(0, Item(channel=item.channel, action="clean_all", title="Eliminar todo", url=config.get_setting("downloadlistpath"), serie_name = item.serie_name)) return itemlist
def listado(item): logger.info() itemlist = [] try: data_dict = jsontools.load_json(httptools.downloadpage(item.url).data) except: return itemlist # Devolvemos lista vacia #Filtrado y busqueda if item.filtro: for i in data_dict["result"][:]: if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \ (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()): data_dict["result"].remove(i) if not item.page: item.page = 0 offset = int(item.page) * 30 limit = offset + 30 for i in data_dict["result"][offset:limit]: infoLabels = InfoLabels() idioma = '' if item.extra == "movie": action = "findvideos" #viewcontent = 'movies' infoLabels["title"] = i["title"] title = '%s (%s)' % (i["title"], i['year']) url = urlparse.urljoin(__url_base__, "ver-pelicula-online/" + str(i["id"])) elif item.extra == "series": action = "get_temporadas" #viewcontent = 'seasons' title = i["title"] infoLabels['tvshowtitle'] = i["title"] url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"])) else: #item.extra=="series_novedades": action = "findvideos" #viewcontent = 'episodes' infoLabels['tvshowtitle'] = i["title"] infoLabels['season'] = i['season'] infoLabels['episode'] = i['episode'].zfill(2) flag = scrapertools.find_single_match(i["label"], '(\s*\<img src=.*\>)') idioma = i["label"].replace(flag, "") title = '%s %sx%s (%s)' % (i["title"], infoLabels["season"], infoLabels["episode"], idioma) url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"])) if i.has_key("poster") and i["poster"]: thumbnail = re.compile("/w\d{3}/").sub("/w500/", i["poster"]) else: thumbnail = item.thumbnail if i.has_key("background") and i["background"]: fanart = i["background"] else: fanart = item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id'] = i[ 'id'] # title_id: identificador de la pelicula/serie en pepecine.com if i['genre']: infoLabels['genre'] = i['genre'] if i['year']: infoLabels['year'] = i['year'] #if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot'] = i['plot'] else: infoLabels['plot'] = "" if i['runtime']: infoLabels['duration'] = int(i['runtime']) * 60 if i['imdb_rating']: infoLabels['rating'] = i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating'] = i['tmdb_rating'] if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id'] if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id'] newItem = Item( channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent, language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year = i['year'] newItem.contentTitle = i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Paginacion if len(data_dict["result"]) > limit: itemlist.append( item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1)) return itemlist
def listado(item): logger.info() itemlist = [] try: data_dict = jsontools.load_json(httptools.downloadpage(item.url).data) except: return itemlist # Devolvemos lista vacia #Filtrado y busqueda if item.filtro: for i in data_dict["result"][:]: if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \ (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()): data_dict["result"].remove(i) if not item.page: item.page = 0 offset= int(item.page) * 30 limit= offset + 30 for i in data_dict["result"][offset:limit]: infoLabels = InfoLabels() idioma = '' if item.extra == "movie": action= "findvideos" #viewcontent = 'movies' infoLabels["title"]= i["title"] title= '%s (%s)' % (i["title"], i['year'] ) url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"])) elif item.extra=="series": action="get_temporadas" #viewcontent = 'seasons' title= i["title"] infoLabels['tvshowtitle']= i["title"] url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"])) else: #item.extra=="series_novedades": action="findvideos" #viewcontent = 'episodes' infoLabels['tvshowtitle'] = i["title"] infoLabels['season']=i['season'] infoLabels['episode']=i['episode'].zfill(2) flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)') idioma=i["label"].replace(flag,"") title = '%s %sx%s (%s)' %(i["title"], infoLabels["season"], infoLabels["episode"], idioma) url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"])) if i.has_key("poster") and i["poster"]: thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"]) else: thumbnail= item.thumbnail if i.has_key("background") and i["background"]: fanart= i["background"] else: fanart= item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com if i['genre']: infoLabels['genre']=i['genre'] if i['year']: infoLabels['year']=i['year'] #if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot']=i['plot'] else: infoLabels['plot']="" if i['runtime']: infoLabels['duration']=int(i['runtime'])*60 if i['imdb_rating']: infoLabels['rating']=i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating']=i['tmdb_rating'] if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id'] if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent, language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year=i['year'] newItem.contentTitle=i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Paginacion if len(data_dict["result"]) > limit: itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) ) return itemlist
def save_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = next(filetools.walk(path)) ficheros = [filetools.join(path, f) for f in ficheros] nostrm_episodelist = [] for root, folders, files in filetools.walk(path): for file in files: season_episode = scrapertools.get_season_and_episode(file) if season_episode == "" or filetools.exists(filetools.join(path, "%s.strm" % season_episode)): continue nostrm_episodelist.append(season_episode) nostrm_episodelist = sorted(set(nostrm_episodelist)) # Silent es para no mostrar progreso (para videolibrary_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress(config.get_localized_string(20000), config.get_localized_string(60064)) p_dialog.update(0, config.get_localized_string(60065)) channel_alt = generictools.verify_channel(serie.channel) #Preparamos para añadir las urls de emergencia emergency_urls_stat = config.get_setting("emergency_urls", channel_alt) #El canal quiere urls de emergencia? emergency_urls_succ = False channel = __import__('channels.%s' % channel_alt, fromlist=["channels.%s" % channel_alt]) if serie.torrent_caching_fail: #Si el proceso de conversión ha fallado, no se cachean emergency_urls_stat = 0 del serie.torrent_caching_fail new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean tags = [] if config.get_setting("enable_filter", "videolibrary"): tags = [x.strip() for x in config.get_setting("filters", "videolibrary").lower().split(",")] for e in episodelist: headers = {} if e.headers: headers = e.headers if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags): continue try: season_episode = scrapertools.get_season_and_episode(e.title) if not season_episode: continue # Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog if json_path in ficheros: #Si existe el .json sacamos de ahí las urls if overwrite: #pero solo si se se sobrescriben los .json json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json if json_epi.emergency_urls: #si existen las urls de emergencia... e.emergency_urls = json_epi.emergency_urls #... las copiamos else: #y si no... e = emergency_urls(e, channel, json_path, headers=headers) #... las generamos else: e = emergency_urls(e, channel, json_path, headers=headers) #Si el episodio no existe, generamos las urls if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia? if e.emergency_urls: del e.emergency_urls emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog e = emergency_urls(e, channel, json_path, headers=headers) #generamos las urls if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... e.infoLabels = serie.infoLabels #... del canal actual y no el del original e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") if e.videolibray_emergency_urls: del e.videolibray_emergency_urls if e.channel_redir: del e.channel_redir #... y se borran las marcas de redirecciones new_episodelist.append(e) except: if e.contentType == 'episode': logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % e.contentTitle) logger.error(traceback.format_exc()) continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x try: t = float(100) / len(new_episodelist) except: t = 0 last_season_episode = '' for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title) high_sea = e.contentSeason high_epi = e.contentEpisodeNumber if scrapertools.find_single_match(e.title, '[a|A][l|L]\s*(\d+)'): high_epi = int(scrapertools.find_single_match(e.title, 'al\s*(\d+)')) max_sea = e.infoLabels["number_of_seasons"] max_epi = 0 if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1): if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]: max_epi = e.infoLabels["number_of_episodes"] else: max_epi = e.infoLabels["temporada_num_episodios"] season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in nostrm_episodelist: logger.error('Error en la estructura de la Videoteca: Serie ' + serie.contentSerieName + ' ' + season_episode) continue strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_language: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) if item_nfo.emergency_urls: del item_nfo.emergency_urls #Solo se mantiene en el .json del episodio nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) # En series multicanal, prevalece el infolabels del canal actual y no el del original if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \ and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentSerieName] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break #logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi))) #logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi))) if not silent: p_dialog.close() if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled": # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) #Si la operación de insertar/borrar urls de emergencia en los .jsons de los episodios ha tenido éxito, se marca el .nfo if emergency_urls_succ: if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict): del tvshow_item.emergency_urls if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces if not tvshow_item.emergency_urls: tvshow_item.emergency_urls = dict() if tvshow_item.library_urls.get(serie.channel, False): tvshow_item.emergency_urls.update({serie.channel: True}) elif emergency_urls_stat == 2: #Operación de Borrar enlaces if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False): tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo if tvshow_item.active == 30: tvshow_item.active = 1 if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]: tvshow_item.infoLabels = serie.infoLabels tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0: tvshow_item.active = 0 # ... no la actualizaremos más logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \ (serie.contentSerieName, serie.channel)) update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % tvshow_item.contentSerieName) logger.error(traceback.format_exc()) fallidos = -1 else: # ... si ha sido correcto actualizamos la videoteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","",data) patron = '<div class="MiniFicha">.*?' patron += '<img src="([^"]+).*?' patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?' patron += '<div class="OpcionesDescargasMini">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) for thumbnail, title, cat_padres, cat_hijos, opciones in matches: #logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # Obtenemos el año del titulo y eliminamos lo q sobre patron = '\d{4}$' year = scrapertools.find_single_match(title,patron) if year: title = re.sub(patron, "", title) patron = '\s?-?\s?(line)?\s?-\s?$' title = re.sub(patron, "", title,flags=re.IGNORECASE) # Obtenemos la imagen b por q es mayor thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] # Buscamos opcion de ver online patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' url_ver = scrapertools.find_single_match(opciones, patron) if url_ver: new_item = Item(channel=item.channel, action="findvideos", title=title, thumbnail=thumbnail, url=url_ver, infoLabels={"year":year}, text_color = color1) cat_padres = cat_padres.strip() if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]: #if item.extra == 'movie': new_item.contentTitle = title new_item.extra = "movie" # Filtramos nombres validos para la calidad patron = ("rip|dvd|screener|hd|ts|Telesync") if re.search(patron,cat_hijos,flags=re.IGNORECASE): new_item.contentQuality = cat_hijos new_item.title = "%s [%s]" % (title, cat_hijos) elif cat_padres == "peliculas-dvdrip": new_item.contentQuality = "DVDRIP" new_item.title = "%s [DVDRIP]" % title elif cat_padres == "HDRIP": new_item.contentQuality = "HDRIP" new_item.title = "%s [HDRIP]" % title elif cat_padres == "series": new_item.contentSerieName = cat_hijos patron = re.compile('(\d+)x(\d+)') matches = patron.findall(title) if len(matches) == 1: new_item.contentSeason = matches[0][0] new_item.contentEpisodeNumber = matches[0][1].zfill(2) new_item.extra = "episodie" else: # matches == [('1', '01'), ('1', '02'), ('1', '03')] new_item.extra = "multi-episodie" else: #Otras categorias q de momento no nos interesan continue ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)' new_item.url_descarga = scrapertools.find_single_match(opciones,patron) patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?' new_item.url_torrent = scrapertools.find_single_match(opciones,patron)''' itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion patron = '<div class="sPages">.*?' patron += '<a href="([^"]+)">Siguiente' url_next_page = scrapertools.find_single_match(data,patron) if url_next_page: itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True, text_color = color3, text_blod=True)) return itemlist