def get_episodios(item): logger.info("pelisalacarta.channels.biblioteca get_episodios") itemlist = [] # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Crear un item en la lista para cada strm encontrado for i in ficheros: # strm if i.endswith(".strm"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item().fromurl(filetools.read(filetools.join(raiz, i))) epi.contentChannel = item.contentChannel epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "findvideos" epi.contentEpisodeNumber = episode epi.contentSeason = season # fix sobreescribe el color del texto si viene pasado en el strm epi.text_color = "" # fix para que no se ejecute el método de play para la biblioteca de Kodi epi.strm = False itemlist.append(epi) # videos elif not i.endswith(".nfo") and not i.endswith( ".json") and not i.endswith(".srt"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item() epi.contentChannel = "local" epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "play" epi.contentEpisodeNumber = episode epi.contentSeason = season itemlist.append(epi) library.set_infolabels_from_library(itemlist, tipo="Episodes") return sorted(itemlist, key=get_sort_temp_epi)
def get_episodios(item): logger.info("pelisalacarta.channels.biblioteca get_episodios") itemlist = [] # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Crear un item en la lista para cada strm encontrado for i in ficheros: # strm if i.endswith(".strm"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item().fromurl(filetools.read(filetools.join(raiz, i))) epi.contentChannel = item.contentChannel epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "findvideos" epi.contentEpisodeNumber = episode epi.contentSeason = season # fix sobreescribe el color del texto si viene pasado en el strm epi.text_color = "" # fix para que no se ejecute el método de play para la biblioteca de Kodi epi.strm = False itemlist.append(epi) # videos elif not i.endswith(".nfo") and not i.endswith(".json") and not i.endswith(".srt"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item() epi.contentChannel = "local" epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "play" epi.contentEpisodeNumber = episode epi.contentSeason = season itemlist.append(epi) library.set_infolabels_from_library(itemlist, tipo="Episodes") return sorted(itemlist, key=get_sort_temp_epi)
def episodesxseason(item): logger.info() itemlist = [] episodios = item.json_episodios for episodio in episodios: infoLabels = item.infoLabels language = 'VOSE' if not item.language: if episodio.get('estado'): if 'SUBT' in episodio.get('estado'): language = 'VOSE' else: language = item.language it = Item( action = 'findvideos', channel = item.channel, infoLabels = infoLabels, language = language, thumbnail = item.thumbnail, title = episodio['capitulo'], urls = episodio['opciones'], url = item.url ) if not item.contentType == 'movie': it.contentSeason = item.contentSeason it.contentEpisodeNumber = episodio['capitulo'] itemlist.append(it) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True) for it in itemlist: it.title = unify.add_languages('{}x{}: {}'.format(it.contentSeason, it.contentEpisodeNumber, it.contentTitle), it.language) if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def novedades_episodios(item): logger.info("pelisalacarta.channels.animeflv novedades") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="not"> <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12"> <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0"> <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span> </a> </div> ''' patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" ' \ 'src="([^"]+)"[^<]+<span class="tit_ep"><span class="tit">([^<]+)<' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] for match in matches: scrapedtitle = scrapertools.entityunescape(match[3]) fulltitle = scrapedtitle # directory = match[1] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = urlparse.urljoin( item.url, match[2].replace("mini", "portada")) scrapedplot = "" #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(scrapedtitle, scrapedurl, scrapedthumbnail)) new_item = Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle) content_title = scrapertools.entityunescape(match[1]) if content_title: episode = scrapertools.get_match(content_title, '\s+(\d+)$') content_title = content_title.replace(episode, '') season, episode = numbered_for_tratk(content_title, 1, episode) new_item.hasContentDetails = "true" new_item.contentTitle = content_title new_item.contentSeason = season new_item.contentEpisodeNumber = int(episode) itemlist.append(new_item) return itemlist
def episodesxseason(item): logger.info() itemlist = [] # Los episodios deben venir en un JSON en el item episodios = item.json_episodios for episodio in episodios: infoLabels = item.infoLabels language = 'VOSE' # Asignación de idioma if not item.language: if episodio.get('estado'): if 'SUBT' in episodio.get('estado'): language = 'VOSE' else: language = item.language it = Item(action='findvideos', channel=item.channel, infoLabels=infoLabels, language=language, thumbnail=item.thumbnail, title=episodio['capitulo'], urls=episodio['opciones'], url=item.url) # Determinación dinámica de contentType if not item.contentType == 'movie': it.contentSeason = item.contentSeason it.contentEpisodeNumber = episodio['capitulo'] itemlist.append(it) # Asignación de infoLabels if not item.videolibrary: tmdb.set_infoLabels(itemlist, True) # Formateamos título for it in itemlist: it.title = unify.add_languages( '{}x{}: {}'.format(it.contentSeason, it.contentEpisodeNumber, it.contentTitle), it.language) # Si es peli, mandamos directo a findvideos if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def novedades_episodios(item): logger.info("pelisalacarta.channels.animeflv novedades") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="not"> <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12"> <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0"> <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span> </a> </div> ''' patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" ' \ 'src="([^"]+)"[^<]+<span class="tit_ep"><span class="tit">([^<]+)<' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] for match in matches: scrapedtitle = scrapertools.entityunescape(match[3]) fulltitle = scrapedtitle # directory = match[1] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = urlparse.urljoin(item.url, match[2].replace("mini", "portada")) scrapedplot = "" #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(scrapedtitle, scrapedurl, scrapedthumbnail)) new_item = Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle) content_title = scrapertools.entityunescape(match[1]) if content_title: episode = scrapertools.get_match(content_title, '\s+(\d+)$') content_title = content_title.replace(episode, '') season, episode = numbered_for_tratk(content_title, 1, episode) new_item.hasContentDetails = "true" new_item.contentTitle = content_title new_item.contentSeason = season new_item.contentEpisodeNumber = int(episode) itemlist.append(new_item) return itemlist
def episodesxseason(item): logger.info() itemlist = [] episodes = item.json_episodios for episode in episodes: infoLabels = item.infoLabels language = item.language if item.language else 'VOSE' it = Item( action = 'findvideos', channel = item.channel, contentType = item.contentType, infoLabels = infoLabels, language = language, thumbnail = item.thumbnail, title = item.title, urls = episode['players'], url = item.url, viewType = 'videos' ) # Determinación dinámica de contentType if not item.contentType == 'movie': it.title = (config.get_localized_string(60036) % episode['episode']) it.contentEpisodeNumber = episode['episode'] itemlist.append(it) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Si es peli, mandamos directo a findvideos if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} if overwrite == "everything": overwrite = True overwrite_everything = True else: overwrite_everything = False # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') # fix float porque la division se hace mal en python 2.x t = float(100) / len(episodelist) for i, e in enumerate(episodelist): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) try: season_episode = scrapertools.get_season_and_episode( e.title.lower()) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) except: continue strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros strm_exists_before = True nfo_exists_before = True json_exists_before = True if not strm_exists or overwrite_everything: if not overwrite_everything: strm_exists_before = False # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error( "Se ha producido un error al obtener el nombre de la serie a filtrar" ) # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write( strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if (not nfo_exists or overwrite_everything) and e.infoLabels.get("imdb_id"): if not overwrite_everything: nfo_exists_before = False # Si no existe season_episode.nfo añadirlo if e.infoLabels["tmdb_id"]: scraper.find_and_set_infoLabels(e) head_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % ( e.infoLabels['tmdb_id'], e.contentSeason, e.contentEpisodeNumber) elif e.infoLabels["tvdb_id"]: head_nfo = e.url_scraper else: head_nfo = "Aqui ira el xml" # TODO item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists or overwrite_everything: if not overwrite_everything: json_exists_before = False logger.info("Insertado: %s" % json_path) else: logger.info("Sobreescritos todos los archivos!") # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 if (not overwrite_everything and not json_exists): json_exists = True else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if (not strm_exists_before or not nfo_exists_before or not json_exists_before): if (strm_exists and nfo_exists and json_exists): insertados += 1 else: logger.error("El archivo strm, nfo o json no existe") if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta( days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 # ... y actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<div class="MiniFicha">.*?' patron += '<img src="([^"]+).*?' patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?' patron += '<div class="OpcionesDescargasMini">(.*?)</div>' matches = re.compile(patron, re.DOTALL).findall(data) for thumbnail, title, cat_padres, cat_hijos, opciones in matches: #logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # Obtenemos el año del titulo y eliminamos lo q sobre patron = '\d{4}$' year = scrapertools.find_single_match(title, patron) if year: title = re.sub(patron, "", title) patron = '\s?-?\s?(line)?\s?-\s?$' regex = re.compile(patron, re.I) title = regex.sub("", title) # Obtenemos la imagen b por q es mayor thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] # Buscamos opcion de ver online patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' url_ver = scrapertools.find_single_match(opciones, patron) if url_ver: new_item = Item(channel=item.channel, action="findvideos", title=title, thumbnail=thumbnail, url=url_ver, infoLabels={"year": year}, text_color=color1) cat_padres = cat_padres.strip() if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]: #if item.extra == 'movie': new_item.contentTitle = title new_item.extra = "movie" # Filtramos nombres validos para la calidad patron = ("rip|dvd|screener|hd|ts|Telesync") if re.search(patron, cat_hijos, flags=re.IGNORECASE): new_item.contentQuality = cat_hijos new_item.title = "%s [%s]" % (title, cat_hijos) elif cat_padres == "peliculas-dvdrip": new_item.contentQuality = "DVDRIP" new_item.title = "%s [DVDRIP]" % title elif cat_padres == "HDRIP": new_item.contentQuality = "HDRIP" new_item.title = "%s [HDRIP]" % title elif cat_padres == "series": new_item.contentSerieName = cat_hijos patron = re.compile('(\d+)x(\d+)') matches = patron.findall(title) if len(matches) == 1: new_item.contentSeason = matches[0][0] new_item.contentEpisodeNumber = matches[0][1].zfill(2) new_item.extra = "episodie" else: # matches == [('1', '01'), ('1', '02'), ('1', '03')] new_item.extra = "multi-episodie" else: #Otras categorias q de momento no nos interesan continue ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)' new_item.url_descarga = scrapertools.find_single_match(opciones,patron) patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?' new_item.url_torrent = scrapertools.find_single_match(opciones,patron)''' itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion patron = '<div class="sPages">.*?' patron += '<a href="([^"]+)">Siguiente' url_next_page = scrapertools.find_single_match(data, patron) if url_next_page: itemlist.append( Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True, text_color=color3, text_blod=True)) return itemlist
def save_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] nostrm_episodelist = [] for root, folders, files in filetools.walk(path): for file in files: season_episode = scrapertools.get_season_and_episode(file) if season_episode == "" or filetools.exists( filetools.join(path, "%s.strm" % season_episode)): continue nostrm_episodelist.append(season_episode) nostrm_episodelist = sorted(set(nostrm_episodelist)) # Silent es para no mostrar progreso (para videolibrary_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress( config.get_localized_string(20000), config.get_localized_string(60064)) p_dialog.update(0, config.get_localized_string(60065)) new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean tags = [] if config.get_setting("enable_filter", "videolibrary"): tags = [ x.strip() for x in config.get_setting( "filters", "videolibrary").lower().split(",") ] for e in episodelist: if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags): continue try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str( e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in nostrm_episodelist: continue strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_language: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error( "Se ha producido un error al obtener el nombre de la serie a filtrar" ) # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write( strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentSerieName] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta( days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la videoteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): #import json logger.info("[pepecine.py] listado") itemlist = [] try: data_dict = jsontools.load_json(scrapertools.cache_page(item.url)) except: return itemlist # Devolvemos lista vacia offset = scrapertools.get_match(item.url, 'offset=(\d*)') limit = scrapertools.get_match(item.url, 'limit=(\d*)') for i in data_dict["result"]: infoLabels = {} idioma = '' if item.extra == "movie": action = "get_movie" title = i["title"] + ' (' + i['year'] + ')' url = urlparse.urljoin( __url_base__, "peliculas-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) elif item.extra == "series": action = "get_temporadas" title = i["title"] infoLabels['tvshowtitle'] = i["title"] url = urlparse.urljoin( __url_base__, "series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) else: #item.extra=="series_novedades": action = "get_only_episodio" infoLabels['season'] = i['season'] infoLabels['episode'] = i['episode'].zfill(2) item.extra = infoLabels["season"] + "x" + infoLabels["episode"] infoLabels['tvshowtitle'] = i["title"] flag = scrapertools.find_single_match(i["label"], '(\s*\<img src=.*\>)') idioma = i["label"].replace(flag, "") title = i["title"] + ' ' + item.extra + ' (' + idioma + ')' url = urlparse.urljoin( __url_base__, "series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) if i.has_key("poster") and i["poster"]: thumbnail = re.compile("/w\d{3}/").sub("/w500/", i["poster"]) else: thumbnail = item.thumbnail if i.has_key("background") and i["background"]: fanart = i["background"] else: fanart = item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id'] = i[ 'id'] # title_id: identificador de la pelicula/serie en pepecine.com infoLabels['titleraw'] = i[ "title"] # titleraw: titulo de la pelicula/serie sin formato if i['genre']: infoLabels['genre'] = i['genre'] if i['year']: infoLabels['year'] = i['year'] if i['tagline']: infoLabels['plotoutline'] = i['tagline'] if i['plot']: infoLabels['plot'] = i['plot'] else: infoLabels['plot'] = "" if i['runtime']: infoLabels['duration'] = int(i['runtime']) * 60 if i['imdb_rating']: infoLabels['rating'] = i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating'] = i['tmdb_rating'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year = i['year'] newItem.contentTitle = i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Paginacion if int(data_dict["total_results"]) == int(limit): url = item.url.replace('offset=' + offset, 'offset=' + str(int(offset) + int(limit))) itemlist.append( Item(channel=item.channel, action="listado", text_color="0xFF994D00", title=">> Pagina siguiente >>", thumbnail=item.thumbnail, url=url, extra=item.extra, fanart=fanart_host)) return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') new_episodelist =[] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean for e in episodelist: try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): #import json logger.info("[pepecine.py] listado") itemlist = [] try: data_dict = jsontools.load_json(scrapertools.cache_page(item.url)) except: return itemlist # Devolvemos lista vacia offset= scrapertools.get_match(item.url,'offset=(\d*)') limit= scrapertools.get_match(item.url,'limit=(\d*)') for i in data_dict["result"]: infoLabels={} idioma ='' if item.extra=="movie": action="get_movie" title= i["title"] + ' (' + i['year'] + ')' url= urlparse.urljoin(__url_base__,"peliculas-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) elif item.extra=="series": action="get_temporadas" title= i["title"] infoLabels['tvshowtitle']= i["title"] url= urlparse.urljoin(__url_base__,"series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) else: #item.extra=="series_novedades": action="get_only_episodio" infoLabels['season']=i['season'] infoLabels['episode']=i['episode'].zfill(2) item.extra=infoLabels["season"] + "x" + infoLabels["episode"] infoLabels['tvshowtitle']= i["title"] flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)') idioma=i["label"].replace(flag,"") title=i["title"] + ' ' + item.extra + ' (' + idioma + ')' url= urlparse.urljoin(__url_base__,"series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) if i.has_key("poster") and i["poster"]: thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"]) else: thumbnail= item.thumbnail if i.has_key("background") and i["background"]: fanart= i["background"] else: fanart= item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com infoLabels['titleraw']= i["title"] # titleraw: titulo de la pelicula/serie sin formato if i['genre']: infoLabels['genre']=i['genre'] if i['year']: infoLabels['year']=i['year'] if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot']=i['plot'] else: infoLabels['plot']="" if i['runtime']: infoLabels['duration']=int(i['runtime'])*60 if i['imdb_rating']: infoLabels['rating']=i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating']=i['tmdb_rating'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year=i['year'] newItem.contentTitle=i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Paginacion if int(data_dict["total_results"]) == int(limit): url=item.url.replace('offset='+offset,'offset='+ str(int(offset)+ int(limit))) itemlist.append( Item( channel=item.channel, action="listado", text_color="0xFF994D00", title=">> Pagina siguiente >>", thumbnail=item.thumbnail, url=url, extra=item.extra, fanart=fanart_host) ) return itemlist
def listado(item): logger.info() itemlist = [] try: data_dict = jsontools.load_json(httptools.downloadpage(item.url).data) except: return itemlist # Devolvemos lista vacia #Filtrado y busqueda if item.filtro: for i in data_dict["result"][:]: if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \ (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()): data_dict["result"].remove(i) if not item.page: item.page = 0 offset = int(item.page) * 30 limit = offset + 30 for i in data_dict["result"][offset:limit]: infoLabels = InfoLabels() idioma = '' if item.extra == "movie": action = "findvideos" #viewcontent = 'movies' infoLabels["title"] = i["title"] title = '%s (%s)' % (i["title"], i['year']) url = urlparse.urljoin(__url_base__, "ver-pelicula-online/" + str(i["id"])) elif item.extra == "series": action = "get_temporadas" #viewcontent = 'seasons' title = i["title"] infoLabels['tvshowtitle'] = i["title"] url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"])) else: #item.extra=="series_novedades": action = "findvideos" #viewcontent = 'episodes' infoLabels['tvshowtitle'] = i["title"] infoLabels['season'] = i['season'] infoLabels['episode'] = i['episode'].zfill(2) flag = scrapertools.find_single_match(i["label"], '(\s*\<img src=.*\>)') idioma = i["label"].replace(flag, "") title = '%s %sx%s (%s)' % (i["title"], infoLabels["season"], infoLabels["episode"], idioma) url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"])) if i.has_key("poster") and i["poster"]: thumbnail = re.compile("/w\d{3}/").sub("/w500/", i["poster"]) else: thumbnail = item.thumbnail if i.has_key("background") and i["background"]: fanart = i["background"] else: fanart = item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id'] = i[ 'id'] # title_id: identificador de la pelicula/serie en pepecine.com if i['genre']: infoLabels['genre'] = i['genre'] if i['year']: infoLabels['year'] = i['year'] #if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot'] = i['plot'] else: infoLabels['plot'] = "" if i['runtime']: infoLabels['duration'] = int(i['runtime']) * 60 if i['imdb_rating']: infoLabels['rating'] = i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating'] = i['tmdb_rating'] if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id'] if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id'] newItem = Item( channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent, language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year = i['year'] newItem.contentTitle = i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Paginacion if len(data_dict["result"]) > limit: itemlist.append( item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1)) return itemlist
def listado(item): logger.info() itemlist = [] try: data_dict = jsontools.load_json(httptools.downloadpage(item.url).data) except: return itemlist # Devolvemos lista vacia #Filtrado y busqueda if item.filtro: for i in data_dict["result"][:]: if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \ (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()): data_dict["result"].remove(i) if not item.page: item.page = 0 offset= int(item.page) * 30 limit= offset + 30 for i in data_dict["result"][offset:limit]: infoLabels = InfoLabels() idioma = '' if item.extra == "movie": action= "findvideos" #viewcontent = 'movies' infoLabels["title"]= i["title"] title= '%s (%s)' % (i["title"], i['year'] ) url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"])) elif item.extra=="series": action="get_temporadas" #viewcontent = 'seasons' title= i["title"] infoLabels['tvshowtitle']= i["title"] url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"])) else: #item.extra=="series_novedades": action="findvideos" #viewcontent = 'episodes' infoLabels['tvshowtitle'] = i["title"] infoLabels['season']=i['season'] infoLabels['episode']=i['episode'].zfill(2) flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)') idioma=i["label"].replace(flag,"") title = '%s %sx%s (%s)' %(i["title"], infoLabels["season"], infoLabels["episode"], idioma) url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"])) if i.has_key("poster") and i["poster"]: thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"]) else: thumbnail= item.thumbnail if i.has_key("background") and i["background"]: fanart= i["background"] else: fanart= item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com if i['genre']: infoLabels['genre']=i['genre'] if i['year']: infoLabels['year']=i['year'] #if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot']=i['plot'] else: infoLabels['plot']="" if i['runtime']: infoLabels['duration']=int(i['runtime'])*60 if i['imdb_rating']: infoLabels['rating']=i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating']=i['tmdb_rating'] if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id'] if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent, language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year=i['year'] newItem.contentTitle=i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Paginacion if len(data_dict["result"]) > limit: itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) ) return itemlist
def list_all(item): logger.info() itemlist = [] next = False data = get_source(item.url) patron = 'spotlight_container>.*?image lazy data-original=(.*?)>.*?<div class=spotlight_title>(.*?)<' patron += '(.*?) sres>(\d{4})<.*?playLink href=(.*?)>' matches = re.compile(patron, re.DOTALL).findall(data) first = item.first last = first+19 if last > len(matches): last = len(matches) next = True for scrapedthumbnail, scrapedtitle, type_data, year, scrapedurl in matches[first:last]: url = scrapedurl title = scrapedtitle season = scrapertools.find_single_match(type_data, 'class=title-season>Temporada<.*?> (\d+) <') episode = scrapertools.find_single_match(type_data, 'class=title-season>Episodio<.*?> (\d+) <') if season != '' or episode != '': item.type = 'tvshow' else: item.type = 'movie' new_item = Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail, type=item.type, infoLabels={'year': year}) if item.type == 'tvshow': new_item.action = 'episodios' new_item.contentSerieName = scrapedtitle season = season.strip() episode = episode.strip() if season == '': if 'Anime' in item.title: season = 1 else: season = scrapertools.find_single_match(url, '.*?temp-(\d+)') new_item.contentSeasonNumber = season else: new_item.contentSeasonNumber = season if episode != '': new_item.contentEpisodeNumber = episode if season != '' and episode != '': new_item.title = '%s %sx%s' % (new_item.title, season, episode) elif episode == '': new_item.title = '%s Temporada %s' % (new_item.title, season) else: new_item.action = 'findvideos' new_item.contentTitle = scrapedtitle itemlist.append(new_item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginación if not next: url_next_page = item.url first = last else: url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>") first = 0 if url_next_page: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all', first=first)) return itemlist
def save_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = next(filetools.walk(path)) ficheros = [filetools.join(path, f) for f in ficheros] nostrm_episodelist = [] for root, folders, files in filetools.walk(path): for file in files: season_episode = scrapertools.get_season_and_episode(file) if season_episode == "" or filetools.exists(filetools.join(path, "%s.strm" % season_episode)): continue nostrm_episodelist.append(season_episode) nostrm_episodelist = sorted(set(nostrm_episodelist)) # Silent es para no mostrar progreso (para videolibrary_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress(config.get_localized_string(20000), config.get_localized_string(60064)) p_dialog.update(0, config.get_localized_string(60065)) channel_alt = generictools.verify_channel(serie.channel) #Preparamos para añadir las urls de emergencia emergency_urls_stat = config.get_setting("emergency_urls", channel_alt) #El canal quiere urls de emergencia? emergency_urls_succ = False channel = __import__('channels.%s' % channel_alt, fromlist=["channels.%s" % channel_alt]) if serie.torrent_caching_fail: #Si el proceso de conversión ha fallado, no se cachean emergency_urls_stat = 0 del serie.torrent_caching_fail new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean tags = [] if config.get_setting("enable_filter", "videolibrary"): tags = [x.strip() for x in config.get_setting("filters", "videolibrary").lower().split(",")] for e in episodelist: headers = {} if e.headers: headers = e.headers if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags): continue try: season_episode = scrapertools.get_season_and_episode(e.title) if not season_episode: continue # Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog if json_path in ficheros: #Si existe el .json sacamos de ahí las urls if overwrite: #pero solo si se se sobrescriben los .json json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json if json_epi.emergency_urls: #si existen las urls de emergencia... e.emergency_urls = json_epi.emergency_urls #... las copiamos else: #y si no... e = emergency_urls(e, channel, json_path, headers=headers) #... las generamos else: e = emergency_urls(e, channel, json_path, headers=headers) #Si el episodio no existe, generamos las urls if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia? if e.emergency_urls: del e.emergency_urls emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog e = emergency_urls(e, channel, json_path, headers=headers) #generamos las urls if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... e.infoLabels = serie.infoLabels #... del canal actual y no el del original e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") if e.videolibray_emergency_urls: del e.videolibray_emergency_urls if e.channel_redir: del e.channel_redir #... y se borran las marcas de redirecciones new_episodelist.append(e) except: if e.contentType == 'episode': logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % e.contentTitle) logger.error(traceback.format_exc()) continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x try: t = float(100) / len(new_episodelist) except: t = 0 last_season_episode = '' for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title) high_sea = e.contentSeason high_epi = e.contentEpisodeNumber if scrapertools.find_single_match(e.title, '[a|A][l|L]\s*(\d+)'): high_epi = int(scrapertools.find_single_match(e.title, 'al\s*(\d+)')) max_sea = e.infoLabels["number_of_seasons"] max_epi = 0 if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1): if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]: max_epi = e.infoLabels["number_of_episodes"] else: max_epi = e.infoLabels["temporada_num_episodios"] season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in nostrm_episodelist: logger.error('Error en la estructura de la Videoteca: Serie ' + serie.contentSerieName + ' ' + season_episode) continue strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_language: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) if item_nfo.emergency_urls: del item_nfo.emergency_urls #Solo se mantiene en el .json del episodio nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) # En series multicanal, prevalece el infolabels del canal actual y no el del original if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \ and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentSerieName] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break #logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi))) #logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi))) if not silent: p_dialog.close() if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled": # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) #Si la operación de insertar/borrar urls de emergencia en los .jsons de los episodios ha tenido éxito, se marca el .nfo if emergency_urls_succ: if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict): del tvshow_item.emergency_urls if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces if not tvshow_item.emergency_urls: tvshow_item.emergency_urls = dict() if tvshow_item.library_urls.get(serie.channel, False): tvshow_item.emergency_urls.update({serie.channel: True}) elif emergency_urls_stat == 2: #Operación de Borrar enlaces if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False): tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo if tvshow_item.active == 30: tvshow_item.active = 1 if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]: tvshow_item.infoLabels = serie.infoLabels tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0: tvshow_item.active = 0 # ... no la actualizaremos más logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \ (serie.contentSerieName, serie.channel)) update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % tvshow_item.contentSerieName) logger.error(traceback.format_exc()) fallidos = -1 else: # ... si ha sido correcto actualizamos la videoteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","",data) patron = '<div class="MiniFicha">.*?' patron += '<img src="([^"]+).*?' patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?' patron += '<div class="OpcionesDescargasMini">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) for thumbnail, title, cat_padres, cat_hijos, opciones in matches: #logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # Obtenemos el año del titulo y eliminamos lo q sobre patron = '\d{4}$' year = scrapertools.find_single_match(title,patron) if year: title = re.sub(patron, "", title) patron = '\s?-?\s?(line)?\s?-\s?$' title = re.sub(patron, "", title,flags=re.IGNORECASE) # Obtenemos la imagen b por q es mayor thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] # Buscamos opcion de ver online patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' url_ver = scrapertools.find_single_match(opciones, patron) if url_ver: new_item = Item(channel=item.channel, action="findvideos", title=title, thumbnail=thumbnail, url=url_ver, infoLabels={"year":year}, text_color = color1) cat_padres = cat_padres.strip() if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]: #if item.extra == 'movie': new_item.contentTitle = title new_item.extra = "movie" # Filtramos nombres validos para la calidad patron = ("rip|dvd|screener|hd|ts|Telesync") if re.search(patron,cat_hijos,flags=re.IGNORECASE): new_item.contentQuality = cat_hijos new_item.title = "%s [%s]" % (title, cat_hijos) elif cat_padres == "peliculas-dvdrip": new_item.contentQuality = "DVDRIP" new_item.title = "%s [DVDRIP]" % title elif cat_padres == "HDRIP": new_item.contentQuality = "HDRIP" new_item.title = "%s [HDRIP]" % title elif cat_padres == "series": new_item.contentSerieName = cat_hijos patron = re.compile('(\d+)x(\d+)') matches = patron.findall(title) if len(matches) == 1: new_item.contentSeason = matches[0][0] new_item.contentEpisodeNumber = matches[0][1].zfill(2) new_item.extra = "episodie" else: # matches == [('1', '01'), ('1', '02'), ('1', '03')] new_item.extra = "multi-episodie" else: #Otras categorias q de momento no nos interesan continue ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)' new_item.url_descarga = scrapertools.find_single_match(opciones,patron) patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?' new_item.url_torrent = scrapertools.find_single_match(opciones,patron)''' itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion patron = '<div class="sPages">.*?' patron += '<a href="([^"]+)">Siguiente' url_next_page = scrapertools.find_single_match(data,patron) if url_next_page: itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True, text_color = color3, text_blod=True)) return itemlist