def get_episodios(item): logger.info("pelisalacarta.channels.biblioteca get_episodios") itemlist = [] # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Crear un item en la lista para cada strm encontrado for i in ficheros: # strm if i.endswith(".strm"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item().fromurl(filetools.read(filetools.join(raiz, i))) epi.contentChannel = item.contentChannel epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "findvideos" epi.contentEpisodeNumber = episode epi.contentSeason = season # fix sobreescribe el color del texto si viene pasado en el strm epi.text_color = "" # fix para que no se ejecute el método de play para la biblioteca de Kodi epi.strm = False itemlist.append(epi) # videos elif not i.endswith(".nfo") and not i.endswith(".json") and not i.endswith(".srt"): season, episode = scrapertools.get_season_and_episode(i).split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue epi = Item() epi.contentChannel = "local" epi.path = filetools.join(raiz, i) epi.title = i epi.channel = "biblioteca" epi.action = "play" epi.contentEpisodeNumber = episode epi.contentSeason = season itemlist.append(epi) library.set_infolabels_from_library(itemlist, tipo="Episodes") return sorted(itemlist, key=get_sort_temp_epi)
def get_title(item): # support.info("ITEM NEWEST ->", item) # item.contentSerieName c'è anche se è un film if item.contentSerieName and item.contentType != 'movie': # Si es una serie title = item.contentSerieName # title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) if not scrapertools.get_season_and_episode( title) and item.contentEpisodeNumber: # contentSeason non c'è in support if not item.contentSeason: item.contentSeason = '1' title = "%sx%s - %s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2), title) else: seas = scrapertools.get_season_and_episode(item.title) if seas: title = "%s - %s" % (seas, title) elif item.contentTitle: # If it is a movie with the adapted channel title = item.contentTitle elif item.contentTitle: # If the channel is not adapted title = item.contentTitle else: # As a last resort title = item.title # We clean the title of previous format labels title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) title = '[B]' + title + '[/B]' if item.contentLanguage == '': pass elif type(item.contentLanguage) == list and len(item.contentLanguage) == 1: title += support.typo(item.contentLanguage[0], '_ [] color kod') elif type(item.contentLanguage) != '': title += support.typo(item.contentLanguage, '_ [] color kod') elif type(item.contentLanguage) == list: title += item.contentLanguage if item.quality: title += support.typo(item.quality, '_ [] color kod') # season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.news and not item.episode) else '' # if season_: # title += season_ return title
def episodesxseasons(item): logger.info() itemlist = [] infoLabels = [] data = httptools.downloadpage(item.url, canonical=canonical).data bloque = scrapertools.find_single_match(data, 'season-%s.*?</ul>' % item.id) patron = '(?is)<a href="([^"]+)' patron += '.*?src="([^"]+)' patron += '.*?"Title">([^<]+)' patron += '.*?<p>([^<]+)' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedthumbnail, scrapedtitle, scrapeddate in matches: season, episode = scrapertools.get_season_and_episode( scrapedtitle).split("x") infoLabels = {"episode": episode, "season": season} contentSerieName = scrapertools.find_single_match( scrapedtitle, '(.*?) \d') itemlist.append( item.clone(channel=item.channel, action="findvideos", episode=episode, infoLabels=infoLabels, contentSerieName=contentSerieName, title=scrapedtitle, thumbnail=scrapedthumbnail, url=urlparse.urljoin(host, scrapedurl))) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) itemlist = sorted(itemlist, key=lambda i: i.episode) return itemlist
def Guardar(item): logger.info("[library.py] Guardar") if item.category != "Series": item.category = "Cine" if item.category == "Cine": Archivo = os.path.join(MOVIES_PATH, LimpiarNombre(item.title) + ".strm") elif item.category == "Series": if item.show == "": CarpetaSerie = os.path.join(SERIES_PATH, "Serie_sin_titulo") else: CarpetaSerie = os.path.join(SERIES_PATH, LimpiarNombre(item.show)) if not os.path.exists(CarpetaSerie.decode("utf8")): os.mkdir(CarpetaSerie.decode("utf8")) from core import scrapertools Archivo = os.path.join(CarpetaSerie,scrapertools.get_season_and_episode(LimpiarNombre(item.title)) + ".strm") if item.folder ==False: item.channel="library" item.file =Archivo logger.info("-----------------------------------------------------------------------") logger.info("Guardando en la Libreria: " + Archivo) logger.info(item.tostring()) logger.info("-----------------------------------------------------------------------") LIBRARYfile = open(Archivo.decode("utf8") ,"w") import guitools LIBRARYfile.write(guitools.ConstruirStrm(item)) LIBRARYfile.flush() LIBRARYfile.close() return True
def get_title(item): if item.contentSerieName: # Si es una serie title = item.contentSerieName if not scrapertools.get_season_and_episode( title) and item.contentEpisodeNumber: if not item.contentSeason: item.contentSeason = '1' title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) #4l3x87 - fix to add Sub-ITA in newest if item.contentLanguage: title += " " + item.contentLanguage elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle elif item.fulltitle: # Si el canal no esta adaptado title = item.fulltitle else: # Como ultimo recurso title = item.title # Limpiamos el titulo de etiquetas de formato anteriores title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) return title
def AddCapitulos(itemlist): #itemlist contiene todos los capitulos de una serie logger.info("[library.py] AddCapitulos") nuevos = 0 CarpetaSerie = os.path.join(SERIES_PATH, LimpiarNombre(itemlist[0].show)) if os.path.exists(CarpetaSerie.decode("utf8")): #obtener los capitulos guardados lista_capitulos = os.listdir(CarpetaSerie) lista_capitulos = [ os.path.basename(c) for c in lista_capitulos if c.endswith('.strm') ] #obtener capitulos disponibles y guardarlos si no lo estan ya for item in itemlist: if item.action != "add_serie_to_library" and item.action != "download_all_episodes": capitulo = scrapertools.get_season_and_episode( LimpiarNombre(item.title)) + ".strm" if capitulo not in lista_capitulos: item.category = 'Series' item.action = 'play_from_library' nuevos += 1 Guardar(item) else: logger.info( "[library.py] AddCapitulos Error: No existe el directorio " + CarpetaSerie) return nuevos
def episodesxseasons(item): logger.info() itemlist = list() soup = create_soup(item.url).find("div", class_="season-content") matches = soup.find_all("div", class_="season") infoLabels = item.infoLabels season = infoLabels["season"] for elem in matches: if "temporada %s" % season not in elem.h2.text.lower(): continue epi_list = elem.find_all("li") for epi in epi_list: try: url = epi.a["href"] except: continue epi_data = scrapertools.get_season_and_episode(url) epi_num = epi_data.split("x")[1] infoLabels["episode"] = epi_num title = "%s" % (epi_data) itemlist.append(Item(channel=item.channel, title=title, url=url, action='findvideos', infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, True) return itemlist
def Guardar(item): logger.info("[library.py] Guardar") if item.category == "Series": if item.show == "": CarpetaSerie = os.path.join(SERIES_PATH, "Serie_sin_titulo") else: CarpetaSerie = os.path.join(SERIES_PATH, LimpiarNombre(item.show)) if not os.path.exists(CarpetaSerie.decode("utf8")): os.mkdir(CarpetaSerie.decode("utf8")) Archivo = os.path.join(CarpetaSerie,scrapertools.get_season_and_episode(LimpiarNombre(item.title ))+ ".strm") else: category = "Cine" Archivo = os.path.join(MOVIES_PATH, LimpiarNombre(item.title) + ".strm") if item.action == "play": item.channel="library" item.extra =Archivo logger.info("-----------------------------------------------------------------------") logger.info("Guardando en la Libreria: " + Archivo) logger.info(item.tostring()) logger.info("-----------------------------------------------------------------------") LIBRARYfile = open(Archivo.decode("utf8") ,"w") from platformcode.xbmc import launcher LIBRARYfile.write(launcher.ConstruirURL(item)) LIBRARYfile.flush() LIBRARYfile.close() return True
def last_episodes(item): logger.info() itemlist = [] infoLabels = [] data = httptools.downloadpage(item.url).data bloque = scrapertools.find_single_match(data, 'Ultimos Episodios.*?</ul>') patron = '(?is)<a href="([^"]+)' patron += '.*?src="([^"]+)' patron += '.*?"Title">([^<]+)' patron += '.*?<p>([^<]+)' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedthumbnail, scrapedtitle, scrapeddate in matches: season, episode = scrapertools.get_season_and_episode( scrapedtitle).split("x") infoLabels = {"episode": episode, "season": season} contentSerieName = scrapertools.find_single_match( scrapedtitle, '(.*?) \d') itemlist.append( item.clone( action="findvideos", channel=item.channel, contentSerieName=contentSerieName, infoLabels=infoLabels, thumbnail="https://" + scrapedthumbnail, title=scrapedtitle + " %s" % scrapeddate, url=urlparse.urljoin(host, scrapedurl), )) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) return itemlist
def episodios(item): logger.info("url: %s" % item.url) infoLabels = {} data = httptools.downloadpage(item.url).data episodes = re.findall( 'visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>', data, re.MULTILINE | re.DOTALL) itemlist = [] for url, title, langs in episodes: s_e = scrapertools.get_season_and_episode(title) infoLabels = item.infoLabels infoLabels["season"] = s_e.split("x")[0] infoLabels["episode"] = s_e.split("x")[1] languages = " ".join([ "[%s]" % IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs) ]) filter_lang = languages.replace("[", "").replace("]", "").split(" ") itemlist.append( item.clone(action="findvideos", infoLabels=infoLabels, language=filter_lang, title="%s %s %s" % (item.title, title, languages), url=urlparse.urljoin(HOST, url))) itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality) tmdb.set_infoLabels(itemlist, True) # Opción "Añadir esta serie a la videoteca de KODI" if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) return itemlist
def get_title(item): if item.contentSerieName: # Si es una serie title = item.contentSerieName if not scrapertools.get_season_and_episode( title) and item.contentEpisodeNumber: if not item.contentSeason: item.contentSeason = '1' title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle elif item.contentTitle: # Si el canal no esta adaptado title = item.contentTitle else: # Como ultimo recurso title = item.title # Limpiamos el titulo de etiquetas de formato anteriores title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) if '-Serie-' in item.title: title = '%s -Serie-' % title elif '-Varios-' in item.title: title = '%s -Varios-' % title if '[TERM]' in item.title: title = '%s [TERM]' % title return title
def get_sort_temp_epi(item): # logger.debug(item.tostring()) if item.infoLabels and item.infoLabels.get("season", "1") != "" and item.infoLabels.get("episode", "1") != "": return int(item.infoLabels.get("season", "1")), int(item.infoLabels.get("episode", "1")) else: temporada, capitulo = scrapertools.get_season_and_episode(item.title.lower()).split("x") return int(temporada), int(capitulo)
def capitulos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data bloque = scrapertools.find_single_match( data, 'Lista de Temporadas.*?Content principal') patron = '<a href="([^"]+).*?' patron += '<span>(.*?)</span>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedtitle in matches: scrapedtitle = scrapedtitle.strip() s_e = scrapertools.get_season_and_episode(scrapedurl.replace("-", "")) if s_e != "": season = s_e.split("x")[0] episode = s_e.split("x")[1] else: season = episode = "" scrapedtitle = s_e + " - " + scrapedtitle item.infoLabels["episode"] = episode item.infoLabels["season"] = season itemlist.append( item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl)) tmdb.set_infoLabels(itemlist) if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="")) itemlist.append( item.clone( action="add_serie_to_library", channel=item.channel, extra="episodios", title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url)) return itemlist
def nuevos_episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '(?s)<td class="bb">.*?' # base patron += '<a href="(.*?)">' # url patron += '(.*?)</a>.*?' # nombre_serie patron += '<img src="(.*?)>.*?' # imagen patron += '<h2>(.*?)</h2>' # titulo matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedseriename, scrapedthumbnail, scrapedtitle in matches: numero_capitulo=scrapertools.get_season_and_episode(scrapedurl) if numero_capitulo != "": temporada=numero_capitulo.split("x")[0] capitulo=numero_capitulo.split("x")[1] else: temporada="_" capitulo="_" itemlist.append(Item(channel = item.channel, action = "findvideos", title = scrapedseriename +": " + numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, thumbnail = scrapedthumbnail, contentSerieName=scrapedseriename, contentSeason=temporada, contentEpisodeNumber=capitulo)) # InfoLabels: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Pagina siguiente patron_siguiente='class="pag_b"><a href="([^"]+)' url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente) if url_pagina_siguiente != "": pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+') itemlist.append(Item(channel = item.channel, action = "nuevos_episodios", title = pagina, url = url_pagina_siguiente)) return itemlist
def get_episodes(item): logger.info("contentAction: %s | contentChannel: %s | contentType: %s" % ( item.contentAction, item.contentChannel, item.contentType)) # El item que pretendemos descargar YA es un episodio if item.contentType == "episode": episodes = [item.clone()] # El item es uma serie o temporada elif item.contentType in ["tvshow", "season"]: # importamos el canal channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) # Obtenemos el listado de episodios episodes = getattr(channel, item.contentAction)(item) itemlist = [] # Tenemos las lista, ahora vamos a comprobar for episode in episodes: # Si partiamos de un item que ya era episodio estos datos ya están bien, no hay que modificarlos if item.contentType != "episode": episode.contentAction = episode.action episode.contentChannel = episode.channel # Si el resultado es una temporada, no nos vale, tenemos que descargar los episodios de cada temporada if episode.contentType == "season": itemlist.extend(get_episodes(episode)) # Si el resultado es un episodio ya es lo que necesitamos, lo preparamos para añadirlo a la descarga if episode.contentType == "episode": # Pasamos el id al episodio if not episode.infoLabels["tmdb_id"]: episode.infoLabels["tmdb_id"] = item.infoLabels["tmdb_id"] # Episodio, Temporada y Titulo if not episode.contentSeason or not episode.contentEpisodeNumber: season_and_episode = scrapertools.get_season_and_episode(episode.title) if season_and_episode: episode.contentSeason = season_and_episode.split("x")[0] episode.contentEpisodeNumber = season_and_episode.split("x")[1] # Buscamos en tmdb if item.infoLabels["tmdb_id"]: scraper.find_and_set_infoLabels(episode) # Episodio, Temporada y Titulo if not episode.contentTitle: episode.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)|\d*x\d*\s*-", "", episode.title).strip() episode.downloadFilename = filetools.validate_path(os.path.join(item.downloadFilename, "%dx%0.2d - %s" % ( episode.contentSeason, episode.contentEpisodeNumber, episode.contentTitle.strip()))) itemlist.append(episode) # Cualquier otro resultado no nos vale, lo ignoramos else: logger.info("Omitiendo item no válido: %s" % episode.tostring()) return itemlist
def get_episodios(item): logger.info() # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Menu contextual: Releer tvshow.nfo head_nfo, item_nfo = library.read_nfo(item.nfo) # Crear un item en la lista para cada strm encontrado for i in ficheros: if i.endswith('.strm'): season_episode = scrapertools.get_season_and_episode(i) if not season_episode: # El fichero no incluye el numero de temporada y episodio continue season, episode = season_episode.split("x") # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas if item.filtrar_season and int(season) != int(item.contentSeason): continue # Obtener los datos del season_episode.nfo nfo_path = filetools.join(raiz, i).replace('.strm', '.nfo') head_nfo, epi = library.read_nfo(nfo_path) # Fijar el titulo del capitulo si es posible if epi.contentTitle: title_episodie = epi.contentTitle.strip() else: title_episodie = "Stagione %s Episodio %s" % \ (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2)) epi.contentTitle = "%sx%s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2)) epi.title = "%sx%s - %s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2), title_episodie) if item_nfo.library_filter_show: epi.library_filter_show = item_nfo.library_filter_show # Menu contextual: Marcar episodio como visto o no visto = item_nfo.library_playcounts.get(season_episode, 0) epi.infoLabels["playcount"] = visto if visto > 0: texto = "Segna episodio come non visto" value = 0 else: texto = "Segna episodio come visto" value = 1 epi.context = [{"title": texto, "action": "mark_content_as_watched", "channel": "biblioteca", "playcount": value, "nfo": item.nfo}] # logger.debug("epi:\n" + epi.tostring('\n')) itemlist.append(epi) return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
def episodios(item): logger.info("url: %s" % item.url) itemlist = [] color_lang = config.get_setting('list_languages_color', default='red') data = do_downloadpage(item.url) patron = 'visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>' episodes = re.findall(patron, data, re.MULTILINE | re.DOTALL) for url, title, langs in episodes: s_e = scrapertools.get_season_and_episode(title) season = int(s_e.split("x")[0]) episode = s_e.split("x")[1] if item.contentSeason and item.contentSeason != season: continue languages = ', '.join([ IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs) ]) titulo = '%s [COLOR %s][%s][/COLOR]' % (title, color_lang, languages) itemlist.append( item.clone(action='findvideos', url=urlparse.urljoin(HOST, url), title=titulo, contentType='episode', contentSeason=season, contentEpisodeNumber=episode)) tmdb.set_infoLabels(itemlist) return itemlist
def episodios(item): infoLabels = {} data = httptools.downloadpage(item.url).data if item.contentSeasonNumber and item.extra2 != 'library': prevtitle = item.extra1 data = scrapertools.find_single_match(data, r'<div style="cursor:pointer">→ Temporada %s(.*?)</div>\n</div>\n' % item.contentSeasonNumber) else: prevtitle = item.title patron = 'visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>' episodes = re.findall(patron, data, re.MULTILINE | re.DOTALL) itemlist = [] for url, title, langs in episodes: s_e = scrapertools.get_season_and_episode(title) infoLabels = item.infoLabels if item.contentSeasonNumber: infoLabels["season"] = item.contentSeasonNumber else: infoLabels["season"] = s_e.split("x")[0] infoLabels["episode"] = s_e.split("x")[1] languages = " ".join( ["[%s]" % IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs)]) filter_lang = languages.replace("[", "").replace("]", "").split(" ") itemlist.append(item.clone(action="findvideos", infoLabels = infoLabels, language=filter_lang, title="%s %s %s" % (prevtitle, title, languages), url=urlparse.urljoin(HOST, url) )) itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality) tmdb.set_infoLabels(itemlist, True) # Opción "Añadir esta serie a la videoteca de KODI" if config.get_videolibrary_support() and len(itemlist) > 0 and not item.contentSeasonNumber: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", thumbnail=thumb_videolibrary)) return itemlist
def Guardar(item): logger.info("[library.py] Guardar") if item.category != "Series": item.category = "Cine" if item.category == "Cine": Archivo = os.path.join(MOVIES_PATH, LimpiarNombre(item.title + ".strm")) elif item.category == "Series": if item.show == "": CarpetaSerie = os.path.join(SERIES_PATH, "Serie_sin_titulo") else: CarpetaSerie = os.path.join(SERIES_PATH, LimpiarNombre(item.show)) if not os.path.exists(CarpetaSerie.decode("utf8")): os.mkdir(CarpetaSerie.decode("utf8")) from core import scrapertools Archivo = os.path.join(CarpetaSerie,scrapertools.get_season_and_episode(LimpiarNombre(item.title + ".strm"))) item.channel="library" item.extra =Archivo logger.info("-----------------------------------------------------------------------") logger.info("Guardando en la Libreria: " + Archivo) logger.info(item.tostring()) logger.info("-----------------------------------------------------------------------") LIBRARYfile = open(Archivo.decode("utf8") ,"w") LIBRARYfile.write(item.serialize()) LIBRARYfile.flush() LIBRARYfile.close() itemlist=[] cliente.Dialogo().MostrarOK(config.get_localized_string(30101),item.title +"<br/>"+ config.get_localized_string(30135)) return itemlist
def get_title(item): #support.log("ITEM NEWEST ->", item) # item.contentSerieName c'è anche se è un film if item.contentSerieName and item.contentType != 'movie': # Si es una serie title = item.contentSerieName #title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) if not scrapertools.get_season_and_episode(title) and item.contentEpisodeNumber: # contentSeason non c'è in support if not item.contentSeason: item.contentSeason = '1' title = "%sx%s - %s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2), title) else: seas = scrapertools.get_season_and_episode(item.title) if seas: title = "%s - %s" % (seas, title) elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle elif item.contentTitle: # Si el canal no esta adaptado title = item.contentTitle else: # Como ultimo recurso title = item.title # Limpiamos el titulo de etiquetas de formato anteriores title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) title = '[B]'+title+'[/B]' if item.contentLanguage == '': pass elif type(item.contentLanguage) == list and len(item.contentLanguage) ==1: title += support.typo(item.contentLanguage[0], '_ [] color kod') elif type(item.contentLanguage) != '': title += support.typo(item.contentLanguage, '_ [] color kod') elif type(item.contentLanguage) == list: title += item.contentLanguage if item.quality: title += support.typo(item.quality, '_ [] color kod') season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.news and not item.episode) else '' if season_: title += season_ return title
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '(?s)class="episodiotitle">.*?a href="(.*?)">(.*?)</a>' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle in matches: # Saco el numero de capitulo de la url numero_capitulo = scrapertools.get_season_and_episode(scrapedurl) if numero_capitulo != "": temporada = numero_capitulo.split("x")[0] capitulo = numero_capitulo.split("x")[1] else: temporada = "_" capitulo = "_" if item.contentSeason and str(item.contentSeason) != temporada: continue itemlist.append( item.clone(action="findvideos", title=numero_capitulo + " - " + scrapedtitle.strip(), url=scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo)) # if item.contentTitle.startswith('Temporada'): # if str(item.contentSeason) == temporada: # itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), # url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo)) # else: # itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), # url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo)) # episodios_por_pagina=20 # if config.get_setting('episodios_x_pag', 'pelisultra').isdigit(): # episodios_por_pagina=int(config.get_setting('episodios_x_pag', 'pelisultra')) # else: # episodios_por_pagina=20 # config.set_setting('episodios_x_pag', '20', 'pelisultra') episodios_por_pagina = int( config.get_setting('episodios_x_pag', 'pelisultra')) * 5 + 10 if not item.page: item.page = 0 itemlist_page = itemlist[item.page:item.page + episodios_por_pagina] if len(itemlist) > item.page + episodios_por_pagina: itemlist_page.append( item.clone(title=">>> Pagina siguiente", page=item.page + episodios_por_pagina)) # InfoLabels: tmdb.set_infoLabels_itemlist(itemlist_page, seekTmdb=True) return itemlist_page
def get_episodes(item): logger.info("pelisalacarta.channels.descargas get_episodes") #El item YA es un episodio, no cal buscar if item.contentType == "episode": episodes = [item.clone()] #El item es uma serie o temporada elif item.contentType in ["tvshow", "season"]: # importamos el canal channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) # Obtenemos el listado de episodios episodes = getattr(channel, item.contentAction)(item) itemlist = [] #Tenemos las lista, ahora vamos a comprobar for episode in episodes: #Si partiamos de un item que ya era episodio estos datos ya están bien, no hay que modificarlos if item.contentType != "episode": episode.contentAction = episode.action episode.contentChannel = episode.channel #Si el resultado es una temporada, no nos vale, tenemos que descargar los episodios de cada temporada if episode.contentType == "season": itemlist.extend(get_episodes(episode)) #Si el resultado es un episodio ya es lo que necesitamos, lo preparamos para añadirlo a la descarga if episode.contentType == "episode": episode.infoLabels = item.infoLabels tmdb.find_and_set_infoLabels_tmdb(episode) episode.action = "menu" episode.channel = "descargas" episode.downloadStatus = 0 episode.downloadProgress = 0 episode.downloadSize = 0 episode.downloadCompleted = 0 if episode.text_color: del episode.text_color if episode.text_bold: del episode.text_bold if episode.text_italic: del episode.text_italic season_and_episode = scrapertools.get_season_and_episode(episode.title) if season_and_episode and episode.contentTitle: episode.contentSeason, episode.contentEpisodeNumber = season_and_episode.split("x") episode.downloadFilename = os.path.join(item.downloadFilename,"%s - %s" % (season_and_episode, episode.contentTitle.strip())) else: episode.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)","",episode.title).strip() episode.downloadFilename = os.path.join(item.downloadFilename, episode.contentTitle) itemlist.append(episode) #Cualquier otro resultado no nos vale, lo ignoramos else: logger.info("Omitiendo item no válido: %s" % episode.tostring()) return itemlist
def get_sort_temp_epi(item): # logger.debug(item.tostring()) if item.infoLabels and item.infoLabels.get( 'season', "1") != "" and item.infoLabels.get('episode', "1") != "": return int(item.infoLabels.get('season', "1")), int( item.infoLabels.get('episode', "1")) else: temporada, capitulo = scrapertools.get_season_and_episode( item.title.lower()).split('x') return int(temporada), int(capitulo)
def get_ep(s): srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server]) if hasattr(srv_mod, 'get_filename'): title = srv_mod.get_filename(s.url) ep = scrapertools.get_season_and_episode(title) if ep: if ep not in episodes: episodes[ep] = [] episodes[ep].append(s)
def filter_ep(s): srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server]) if hasattr(srv_mod, 'get_filename'): title = srv_mod.get_filename(s.url) # support.dbg() if scrapertools.get_season_and_episode(title) == str( item.contentSeason) + "x" + str( item.contentEpisodeNumber).zfill(2): servers.append(s)
def mark_season_as_watched(item): logger.info() # logger.debug("item:\n" + item.tostring('\n')) # Obtener el diccionario de episodios marcados f = filetools.join(item.path, 'tvshow.nfo') head_nfo, it = videolibrarytools.read_nfo(f) if not hasattr(it, 'library_playcounts'): it.library_playcounts = {} # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Marcamos cada uno de los episodios encontrados de esta temporada episodios_marcados = 0 for i in ficheros: if i.endswith(".strm"): season_episode = scrapertools.get_season_and_episode(i) if not season_episode: # El fichero no incluye el numero de temporada y episodio continue season, episode = season_episode.split("x") if int(item.contentSeason) == -1 or int(season) == int( item.contentSeason): name_file = os.path.splitext(os.path.basename(f))[0] it.library_playcounts[name_file] = item.playcount episodios_marcados += 1 if episodios_marcados: if int(item.contentSeason) == -1: # Añadimos todas las temporadas al diccionario item.library_playcounts for k in it.library_playcounts.keys(): if k.startswith("season"): it.library_playcounts[k] = item.playcount else: # Añadimos la temporada al diccionario item.library_playcounts it.library_playcounts["season %s" % item.contentSeason] = item.playcount # se comprueba que si todas las temporadas están vistas, se marque la serie como vista it = check_tvshow_playcount(it, item.contentSeason) # Guardamos los cambios en tvshow.nfo filetools.write(f, head_nfo + it.tojson()) item.infoLabels['playcount'] = item.playcount if config.is_xbmc(): # Actualizamos la BBDD de Kodi from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_season_as_watched_on_kodi( item, item.playcount) platformtools.itemlist_refresh()
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) data = re.sub(r"<!--.*?-->", "", data) data = re.sub(r"a> <img src=/assets/img/banderas/", "a><idioma>", data) data = re.sub(r"<img src=/assets/img/banderas/", "|", data) data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>\s+<", "</idioma><", data) data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>", "", data) patron = '<div id="T1".*?' patron += "<img src='([^']+)'" matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) > 0: thumbnail = matches[0] else: thumbnail = item.thumbnail patron = "<a href='([^']+)'>(.*?)</a><idioma>(.*?)</idioma>" matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedurl, scrapedtitle, scrapedidioma in matches: idioma = "" filter_langs = [] for i in scrapedidioma.split("|"): idioma += " [" + IDIOMAS.get(i, "OVOS") + "]" filter_langs.append(IDIOMAS.get(i, "OVOS")) season_episode = scrapertools.get_season_and_episode(scrapedtitle) title = '%s %s %s' % (season_episode, scrapedtitle, idioma) season_episode = season_episode.split('x') infoLabels['season'] = season_episode[0] infoLabels['episode'] = season_episode[1] itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl), action="findvideos", show=item.show, thumbnail=thumbnail, plot="", language=filter_langs, infoLabels=infoLabels)) itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Opción "Añadir esta serie a la videoteca de XBMC" if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) return itemlist
def episodios(item): patron = r'<td class="MvTbTtl"><a href="([^"]+)">(.*?)<\/a>.*?>\d{4}<' list_groups = ["url", "title", "year"] itemlist = support.scrape(item, patron, list_groups) for itm in itemlist: fixedtitle = scrapertools.get_season_and_episode(itm.url) itm.title = fixedtitle + " - " + itm.title itm.fulltitle = fixedtitle + " - " + itm.fulltitle return itemlist
def mark_season_as_watched(item): logger.info() # logger.debug("item:\n" + item.tostring('\n')) # Get dictionary of marked episodes f = filetools.join(item.path, 'tvshow.nfo') head_nfo, it = videolibrarytools.read_nfo(f) if not hasattr(it, 'library_playcounts'): it.library_playcounts = {} # We get the archives of the episodes raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) # We mark each of the episodes found this season episodios_marcados = 0 for i in ficheros: if i.endswith(".strm"): season_episode = scrapertools.get_season_and_episode(i) if not season_episode: # The file does not include the season and episode number continue season, episode = season_episode.split("x") if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason): name_file = os.path.splitext(filetools.basename(i))[0] it.library_playcounts[name_file] = item.playcount episodios_marcados += 1 if episodios_marcados: if int(item.contentSeason) == -1: # We add all seasons to the dictionary item.library_playcounts for k in list(it.library_playcounts.keys()): if k.startswith("season"): it.library_playcounts[k] = item.playcount else: # Add season to dictionary item.library_playcounts it.library_playcounts["season %s" % item.contentSeason] = item.playcount # it is verified that if all the seasons are seen, the series is marked as view it = check_tvshow_playcount(it, item.contentSeason) # We save the changes to tvshow.nfo filetools.write(f, head_nfo + it.tojson()) item.infoLabels['playcount'] = item.playcount if config.is_xbmc(): # We update the Kodi database from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_season_as_watched_on_kodi(item, item.playcount) platformtools.itemlist_refresh()
def mark_season_as_watched(item): logger.info("pelisalacarta.channels.biblioteca mark_season_as_watched") # logger.debug("item:\n" + item.tostring('\n')) # Obtener el diccionario de episodios marcados f = filetools.join(item.path, "tvshow.nfo") url_scraper = filetools.read(f, 0, 1) it = Item().fromjson(filetools.read(f, 1)) if not hasattr(it, "library_playcounts"): it.library_playcounts = {} # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(item.path).next() # Marcamos cada uno de los episodios encontrados de esta temporada episodios_marcados = 0 for i in ficheros: if i.endswith(".strm"): season_episode = scrapertools.get_season_and_episode(i) if not season_episode: # El fichero no incluye el numero de temporada y episodio continue season, episode = season_episode.split("x") if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason): name_file = os.path.splitext(os.path.basename(i))[0] it.library_playcounts[name_file] = item.playcount episodios_marcados += 1 if episodios_marcados: if int(item.contentSeason) == -1: # Añadimos todas las temporadas al diccionario item.library_playcounts for k in it.library_playcounts.keys(): if k.startswith("season"): it.library_playcounts[k] = item.playcount else: # Añadimos la temporada al diccionario item.library_playcounts it.library_playcounts["season %s" % item.contentSeason] = item.playcount # se comprueba que si todas las temporadas están vistas, se marque la serie como vista it = check_tvshow_playcount(it, item.contentSeason) # Guardamos los cambios en tvshow.nfo filetools.write(f, url_scraper + it.tojson()) item.infoLabels["playcount"] = item.playcount if config.is_xbmc(): # Actualizamos la BBDD de Kodi library.mark_season_as_watched_on_kodi(item, item.playcount) platformtools.itemlist_refresh()
def episodesxseasons(item): itemlist = [] infoLabels = item.infoLabels data = httptools.downloadpage(item.url).data bloq = r'<div style="cursor:pointer">→ Temporada %s(.*?)</div>\n</div>\n' data = scrapertools.find_single_match(data, bloq % item.contentSeasonNumber) patron = 'visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>' episodes = re.findall(patron, data, re.MULTILINE | re.DOTALL) for url, title, langs in episodes: s_e = scrapertools.get_season_and_episode(title) if item.contentSeasonNumber: infoLabels["season"] = item.contentSeasonNumber else: infoLabels["season"] = s_e.split("x")[0] infoLabels["episode"] = s_e.split("x")[1] languages = " ".join([ "[%s]" % IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs) ]) filter_lang = languages.replace("[", "").replace("]", "").split(" ") #logger.error(filter_lang) itemlist.append( item.clone(action="findvideos", infoLabels=infoLabels, language=filter_lang, title="%s %s" % (title, languages), url=urlparse.urljoin(HOST, url))) itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality) if len(itemlist) > 1: itemlist.sort(key=lambda it: int(it.infoLabels["episode"])) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Opción "Añadir esta serie a la videoteca de KODI" if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.season1: itemlist.append( Item( channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, thumbnail=thumb_videolibrary)) return itemlist
def episodios(item): support.log() patron = r'<div class="su-spoiler.*?</i>(.*?)</div>\s+<div class="su-spoiler-content"(.*?)="clearfix">' data = httptools.downloadpage(item.url).data matches = re.compile(patron, re.S).findall(data) if item.args: s = (int(item.args["season"])) try: matches = [matches[s]] except: matches = [matches[(s - 1)]] itemlist = [] for season, block in matches: patron = r'<div class="su-link-ep">\s+<a.*?href="([^"]+)".*?strong>(.*?)</' if item.args: ep = int(item.args["episode"]) patron = r'<div class="su-link-ep">\s+<a.*?href="([^"]+)".*?strong>\s(Episodio ' + str( ep) + r') .*?</' episodes = re.compile(patron, re.MULTILINE).findall(block) for scrapedurl, scrapedtitle in episodes: fixedtitle = scrapertools.get_season_and_episode(season + " " + scrapedtitle) eptitle = re.sub(r"Episodio\s+\d+", "", scrapedtitle).strip() itemlist.append( Item( channel=item.channel, action="findvideos", contentType=item.contentType, title="[B]" + fixedtitle + " " + eptitle + "[/B]", fulltitle=fixedtitle + " " + eptitle, show=fixedtitle + " " + eptitle, url=scrapedurl, extra=item.extra, )) if not item.args: support.videolibrary(itemlist, item) return itemlist
def get_title(item): if item.contentSerieName: # Si es una serie title = item.contentSerieName if not scrapertools.get_season_and_episode(title) and item.contentEpisodeNumber: if not item.contentSeason: item.contentSeason = '1' title = "%s - %sx%s" % (title, item.contentSeason, "{:0>2d}".format(int(item.contentEpisodeNumber))) elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle elif item.fulltitle: # Si el canal no esta adaptado title = item.fulltitle else: # Como ultimo recurso title = item.title # Limpiamos el titulo de etiquetas de formato anteriores title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) return title
def get_title(item): support.log("ITEM NEWEST ->", item) if item.contentSerieName: # Si es una serie title = item.contentSerieName #title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) if not scrapertools.get_season_and_episode( title) and item.contentEpisodeNumber: if not item.contentSeason: item.contentSeason = '1' title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle elif item.fulltitle: # Si el canal no esta adaptado title = item.fulltitle else: # Como ultimo recurso title = item.title # Limpiamos el titulo de etiquetas de formato anteriores ## title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) ## title = re.compile("\[/*B\]", re.DOTALL).sub("", title) ## title = re.compile("\[/*I\]", re.DOTALL).sub("", title) title = '[B]' + title + '[/B]' if item.contentLanguage == '': pass elif type(item.contentLanguage) == list and len(item.contentLanguage) == 1: title += support.typo(item.contentLanguage[0], '_ [] color kod') elif type(item.contentLanguage) != '': title += support.typo(item.contentLanguage, '_ [] color kod') elif type(item.contentLanguage) == list: title += item.contentLanguage if item.quality: title += support.typo(item.quality, '_ [] color kod') return title
def Guardar(item): logger.info("[library.py] Guardar") if item.category == "Series": if item.show == "": CarpetaSerie = os.path.join(SERIES_PATH, "Serie_sin_titulo") else: CarpetaSerie = os.path.join(SERIES_PATH, LimpiarNombre(item.show)) if not os.path.exists(CarpetaSerie.decode("utf8")): os.mkdir(CarpetaSerie.decode("utf8")) Archivo = os.path.join( CarpetaSerie, scrapertools.get_season_and_episode(LimpiarNombre(item.title)) + ".strm") else: category = "Cine" Archivo = os.path.join(MOVIES_PATH, LimpiarNombre(item.title) + ".strm") if item.action == "play": item.channel = "library" item.extra = Archivo logger.info( "-----------------------------------------------------------------------" ) logger.info("Guardando en la Libreria: " + Archivo) logger.info(item.tostring()) logger.info( "-----------------------------------------------------------------------" ) LIBRARYfile = open(Archivo.decode("utf8"), "w") from platformcode.xbmc import launcher LIBRARYfile.write(launcher.ConstruirURL(item)) LIBRARYfile.flush() LIBRARYfile.close() return True
def AddCapitulos(itemlist): #itemlist contiene todos los capitulos de una serie logger.info("[library.py] AddCapitulos") nuevos=0 CarpetaSerie = os.path.join(SERIES_PATH, LimpiarNombre(itemlist[0].show)) if os.path.exists(CarpetaSerie.decode("utf8")): #obtener los capitulos guardados lista_capitulos= os.listdir(CarpetaSerie) lista_capitulos= [os.path.basename(c) for c in lista_capitulos if c.endswith('.strm')] #obtener capitulos disponibles y guardarlos si no lo estan ya for item in itemlist: if item.action!="add_serie_to_library" and item.action!="download_all_episodes": capitulo= scrapertools.get_season_and_episode(LimpiarNombre(item.title ))+ ".strm" if capitulo not in lista_capitulos: item.category='Series' item.action= 'play_from_library' nuevos +=1 Guardar(item) else: logger.info("[library.py] AddCapitulos Error: No existe el directorio " + CarpetaSerie) return nuevos
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') new_episodelist =[] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean for e in episodelist: try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
if c.endswith("[%s]" % _id): path = filetools.join(raiz, c) break if not path: path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip()) logger.info("Creando directorio serie: " + path) try: filetools.mkdir(path) except OSError, exception: if exception.errno != errno.EEXIST: raise # Eliminamos de la lista lo que no sean episodios for it in episodelist: if not scrapertools.get_season_and_episode(it.title): episodelist.remove(it) tvshow_path = filetools.join(path, "tvshow.nfo") if not filetools.exists(tvshow_path): # Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos logger.info("Creando tvshow.nfo: " + tvshow_path) if item.infoLabels['url_scraper']: # head_nfo = "https://www.themoviedb.org/tv/%s\n" % item.infoLabels['tmdb_id'] head_nfo = item.infoLabels['url_scraper'] + "\n" else: head_nfo = "Aqui ira el xml" # TODO item_tvshow = Item(title=item.contentTitle, channel="biblioteca", action="get_temporadas", fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'], infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, ""))
def add_serie_to_library(item, channel=None): """ Guarda contenido en la libreria de series. Este contenido puede ser uno de estos dos: - La serie con todos los capitulos incluidos en la lista episodelist. - Un solo capitulo descargado previamente en local. Para añadir episodios descargados en local, el item debe tener exclusivamente: - contentSerieName (o show): Titulo de la serie - contentTitle: titulo del episodio para extraer season_and_episode ("1x01 Piloto") - title: titulo a mostrar junto al listado de enlaces -findvideos- ("Reproducir video local") - infoLabels["tmdb_id"] o infoLabels["imdb_id"] - contentType != "movie" - channel = "descargas" - url : ruta local al video @type item: item @param item: item que representa la serie a guardar @type channel: modulo @param channel: canal desde el que se guardara la serie. Por defecto se importara item.from_channel o item.channel """ logger.info("show=#" + item.show + "#") # logger.debug(item.tostring('\n')) if item.channel == "descargas": itemlist = [item.clone()] else: # Esta marca es porque el item tiene algo más aparte en el atributo "extra" item.action = item.extra if isinstance(item.extra, str) and "###" in item.extra: item.action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] if item.from_action: item.__dict__["action"] = item.__dict__.pop("from_action") if item.from_channel: item.__dict__["channel"] = item.__dict__.pop("from_channel") if not channel: try: channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) except ImportError: exec "import channels." + item.channel + " as channel" # Obtiene el listado de episodios itemlist = getattr(channel, item.action)(item) # Eliminamos de la lista lo q no sean episodios for it in itemlist: if not scrapertools.get_season_and_episode(it.title): itemlist.remove(it) if not itemlist: platformtools.dialog_ok("Libreria", "ERRORE, la serie non è stata aggiunta alla libreria", "Impossibile ottenere qualsiasi episodio") logger.error("La serie %s no se ha podido añadir a la biblioteca. No se ha podido obtener ningun episodio" % item.show) return insertados, sobreescritos, fallidos = save_library_tvshow(item, itemlist) if fallidos == -1: platformtools.dialog_ok("Libreria", "ERRORE, la serie non è stata aggiunta alla libreria") logger.error("La serie %s no se ha podido añadir a la biblioteca" % item.show) elif fallidos > 0: platformtools.dialog_ok("Libreria", "ERRORE, la serie non è stata aggiunta completamente alla libreria") logger.error("No se han podido añadir %s episodios de la serie %s a la biblioteca" % (fallidos, item.show)) else: platformtools.dialog_ok("Libreria", "La serie è stata aggiunta alla libreria") logger.info("[launcher.py] Se han añadido %s episodios de la serie %s a la biblioteca" % (insertados, item.show))
def get_episodios(item): logger.info("[newpct1.py] get_episodios") itemlist=[] logger.info("[newpct1.py] get_episodios url=" +item.url) data = re.sub(r'\n|\r|\t|\s{2}|<!--.*?-->|<i class="icon[^>]+"></i>',"",scrapertools.cache_page(item.url)) data = unicode( data, "iso-8859-1" , errors="replace" ).encode("utf-8") logger.info("[newpct1.py] data=" +data) patron = '<ul class="buscar-list">(.*?)</ul>' #logger.info("[newpct1.py] patron=" + patron) fichas = scrapertools.get_match(data,patron) #logger.info("[newpct1.py] matches=" + str(len(fichas))) #<li><a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><img src="http://www.newpct1.com/pictures/c/minis/1880_forever.jpg" alt="Serie Forever 1x01"></a> <div class="info"> <a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><h2 style="padding:0;">Serie <strong style="color:red;background:none;">Forever - Temporada 1 </strong> - Temporada<span style="color:red;background:none;">[ 1 ]</span>Capitulo<span style="color:red;background:none;">[ 01 ]</span><span style="color:red;background:none;padding:0px;">Espa�ol Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2></a> <span>27-10-2014</span> <span>450 MB</span> <span class="color"><ahref="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"> Descargar</a> </div></li> #logger.info("[newpct1.py] get_episodios: " + fichas) patron = '<li><a href="([^"]+).*?' #url patron += '<img src="([^"]+)".*?' #thumbnail patron += '<h2 style="padding(.*?)/h2>' #titulo, idioma y calidad matches = re.compile(patron,re.DOTALL).findall(fichas) #logger.info("[newpct1.py] get_episodios matches: " + str(len(matches))) for scrapedurl,scrapedthumbnail,scrapedinfo in matches: try: url = scrapedurl if '</span>' in scrapedinfo: #logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo) #<h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Big Bang Theory - Temporada 6 </strong> - Temporada<span style="color:red;background:none;">[ 6 ]</span>Capitulo<span style="color:red;background:none;">[ 03 ]</span><span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2> patron = '<span style=".*?">\[\s*(.*?)\]</span>.*?' #temporada patron += '<span style=".*?">\[\s*(.*?)\].*?' #capitulo patron += ';([^/]+)' #idioma info_extra = re.compile(patron,re.DOTALL).findall(scrapedinfo) (temporada,capitulo,idioma)=info_extra[0] #logger.info("[newpct1.py] get_episodios: temporada=" + temporada) #logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo) #logger.info("[newpct1.py] get_episodios: idioma=" + idioma) if '">' in idioma: idioma= " [" + scrapertools.find_single_match(idioma,'">([^<]+)').strip() +"]" elif ' ' in idioma: idioma= " [" + scrapertools.find_single_match(idioma,' ([^<]+)').strip() +"]" else: idioma="" title = item.title + " (" + temporada.strip() + "x" + capitulo.strip() + ")" + idioma else: #<h2 style="padding:0;">The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]</h2> #<h2 style="padding:0;">The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]</h2 #<h2 style="padding:0;">The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish]</h2> try: temp ,cap = scrapertools.get_season_and_episode(scrapedinfo).split('x') except: #Formatear temporadaXepisodio patron= re.compile('Cap.*?\s*([\d]+)',re.IGNORECASE) info_extra=patron.search(scrapedinfo) if len(str(info_extra.group(1)))>=3: cap=info_extra.group(1)[-2:] temp=info_extra.group(1)[:-2] else: cap=info_extra.group(1) patron='Temp.*?\s*([\d]+)' temp= re.compile(patron,re.IGNORECASE).search(scrapedinfo).group(1) title = item.title + " ("+ temp + 'x' + cap + ")" #logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart) itemlist.append( Item(channel=__channel__, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, show=item.show, fanart=item.fanart) ) except: logger.info("[newpct1.py] ERROR al añadir un episodio") if "pagination" in data: patron = '<ul class="pagination">(.*?)</ul>' paginacion = scrapertools.get_match(data,patron) #logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion) if "Next" in paginacion: url_next_page = scrapertools.get_match(paginacion,'<a href="([^>]+)>Next</a>')[:-1] url_next_page= url_next_page.replace(" ","%20") #logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page) itemlist.append( Item(channel=__channel__, action="get_episodios" , title=">> Página siguiente" , url=url_next_page)) return itemlist
def save_library_episodes(path, episodelist, serie, silent=False): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info("streamondemand.platformcode.library save_library_episodes") # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("streamondemand.platformcode.library save_library_episodes No hay lista de episodios, " "salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') # fix float porque la division se hace mal en python 2.x t = float(100) / len(episodelist) addon_name = sys.argv[0].strip() if not addon_name or addon_name.startswith("default.py"): addon_name = "plugin://plugin.video.streamondemand/" for i, e in enumerate(episodelist): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) # Añade todos menos el que dice "Añadir esta serie..." o "Descargar esta serie..." if e.action == "add_serie_to_library" or e.action == "download_all_episodes": continue season_episode = scrapertools.get_season_and_episode(e.title.lower()) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") filename = "{0}.strm".format(season_episode) fullfilename = filetools.join(path, filename) nuevo = not filetools.exists(fullfilename) if e.infoLabels.get("tmdb_id"): tmdb.find_and_set_infoLabels_tmdb(e, config.get_setting("scrap_ask_name") == "true") e.strm = True # Para depuración creamos un .json al lado del .strm, para poder visualizar que parametros se estan guardando filetools.write(fullfilename + ".json", e.tojson()) # TODO fix temporal, en algunas ocasiones no se reproduce desde la biblioteca de kodi si tiene valor # por ejemplo serie doctor who, en seriesblanco e.infoLabels['tmdb_id'] = "" if filetools.write(fullfilename, '{addon}?{url}'.format(addon=addon_name, url=e.tourl())): if nuevo: insertados += 1 else: sobreescritos += 1 else: fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() # si se han añadido episodios los actualizamos en la biblioteca de Kodi con la serie if fallidos >= 0: # TODO arreglar el porque hay que poner la ruta special ruta = "special://home/userdata/addon_data/plugin.video.streamondemand/library/SERIES/" + \ "{0} [{1}]".format(serie.contentSerieName.strip().lower(), serie.channel).lower() + "/" update(ruta) logger.debug("insertados= {0}, sobreescritos={1}, fallidos={2}".format(insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def savelibrary(titulo="",url="",thumbnail="",server="",plot="",canal="",category="Cine",Serie="",verbose=True,accion="play_from_library",pedirnombre=True, subtitle="", extra=""): logger.info("[library.py] savelibrary titulo="+titulo+", url="+url+", server="+server+", canal="+canal+", category="+category+", serie="+Serie+", accion="+accion+", subtitle="+subtitle) if category != "Series": #JUR - DEBUGIN INTERNO PARA 2.14 category = "Cine" if category == "Cine": filename=string.translate(titulo,allchars,deletechars)+".strm" fullfilename = os.path.join(MOVIES_PATH,filename) elif category == "Series": if Serie == "": #Añadir comprobación de len>0 bien hecha logger.info('[library.py] savelibrary ERROR: intentando añadir una serie y serie=""') pathserie = SERIES_PATH else: #Eliminamos caracteres indeseados para archivos en el nombre de la serie Serie = title_to_folder_name(Serie) pathserie = xbmc.translatePath( os.path.join( SERIES_PATH, Serie ) ) if not os.path.exists(pathserie): logger.info("[library.py] savelibrary Creando directorio serie:"+pathserie) try: os.mkdir(pathserie) except: os.mkdir(pathserie) #Limpiamos el titulo para usarlo como fichero from core import scrapertools filename = scrapertools.get_season_and_episode(titulo)+".strm" #filename=string.translate(titulo,allchars,deletechars)+".strm" fullfilename = os.path.join(pathserie,filename) else: #Resto de categorias de momento en la raiz de library fullfilename = os.path.join(LIBRARY_PATH,filename) if os.path.exists(fullfilename): logger.info("[library.py] savelibrary el fichero existe. Se sobreescribe") nuevo = 0 else: nuevo = 1 try: LIBRARYfile = open(fullfilename,"w") except IOError: logger.info("[library.py] savelibrary Error al grabar el archivo "+fullfilename) nuevo = 0 raise # itemurl = '%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s' % ( sys.argv[ 0 ] , canal , "strm" , urllib.quote_plus( category ) , urllib.quote_plus( titulo ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server ) # Eliminación de plot y thumnail addon_name = sys.argv[ 0 ] if addon_name.strip()=="": addon_name="plugin://plugin.video.pelisalacarta/" itemurl = '%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&subtitle=%s&extra=%s' % ( addon_name , canal , accion , urllib.quote_plus( category ) , urllib.quote_plus( titulo ) , urllib.quote_plus( url ) , "" , "" , server , Serie , urllib.quote_plus(subtitle) , urllib.quote_plus(extra) ) logger.info("[library.py] savelibrary fullfilename=%s , itemurl=%s" % (fullfilename,itemurl)) LIBRARYfile.write(itemurl) # LIBRARYfile.write(urllib.quote_plus(url)+'\n') # LIBRARYfile.write(urllib.quote_plus(thumbnail)+'\n') # LIBRARYfile.write(urllib.quote_plus(server)+'\n') # LIBRARYfile.write(urllib.quote_plus(downloadtools.limpia_nombre_excepto_1(plot))+'\n') LIBRARYfile.flush(); LIBRARYfile.close() logger.info("[library.py] savelibrary acaba") return nuevo
def download_all_episodes(item): from servers import servertools from core import downloadtools from core import scrapertools # Esto es poco elegante... # Esta marca es porque el item tiene algo más aparte en el atributo "extra" if item.extra: action = item.extra if item.refered_action: action = item.refered_action if "###" in action: item.extra = action.split("###")[1] action = action.split("###")[0] #Importamos el canal channel = ImportarCanal(item.channel) #Ejecutamos la funcion exec "itemlist = channel."+action+"(item)" #Quitamos estos dos elementos de la lista (si los hay) for episodio in itemlist: if episodio.action=="add_serie_to_library" or episodio.action=="download_all_episodes": itemlist.remove(episodio) #Abrimos el dialogo pDialog = guitools.Dialog_Progress('pelisalacarta', 'Descargando ' + item.show) for x, episodio in enumerate(itemlist): #Si se presiona cancelar, se cancela if pDialog.iscanceled(): return #Extraemos la Temporada y el Episodio episodio.title = scrapertools.get_season_and_episode(episodio.title) #Actualizamos el progreso pDialog.Actualizar(((x)*100)/len(itemlist), 'Descargando ' + item.show, 'Descargando episodio: ' + episodio.title) # Extrae los mirrors if hasattr(channel, 'findvideos'): mirrors_itemlist = channel.findvideos(episodio) else: mirrors_itemlist = findvideos(episodio,episodio.channel) descargado = False #Descarga el primer mirror que funcione for mirror_item in mirrors_itemlist: if hasattr(channel, 'play'): video_items = channel.play(mirror_item) else: video_items = [mirror_item] if len(video_items)>0: video_item = video_items[0] # Comprueba que esté disponible video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( video_item.server , video_item.url , video_password="" , muestra_dialogo=False) # Lo descarga if puedes: # El vídeo de más calidad es el último devuelve = downloadtools.downloadbest(video_urls,item.show+" "+episodio.title+" ["+video_item.server+"]",continuar=False) if devuelve==0: logger.info("[launcher.py] download_all_episodes - Archivo Descargado") descargado = True break elif devuelve==-1: pDialog.Cerrar() logger.info("[launcher.py] download_all_episodes - Descarga abortada") guitools.Dialog_OK("pelisalacarta" , "La descarga ha sido cancelada") return else: continue pDialog.Cerrar()
def series_library(item): logger.info("pelisalacarta.channels.tvvip series_library") # Funcion unicamente para añadir/actualizar series a la libreria lista_episodios = [] show = item.show.strip() data_serie = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) data_serie = jsontools.load_json(data_serie) # Para series que en la web se listan divididas por temporadas if data_serie["sortedPlaylistChilds"]: for season_name in data_serie["sortedPlaylistChilds"]: url_season = "http://tv-vip.com/json/playlist/%s/index.json" % season_name['id'] data = scrapertools.anti_cloudflare(url_season, host=host, headers=headers) data = jsontools.load_json(data) if data["sortedRepoChilds"]: for child in data["sortedRepoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " " + str(data['seasonNumber']) + "x00" lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle, contentTitle=fulltitle, show=show)) else: for child in data["repoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child fulltitle = child.capitalize().replace('_', ' ') try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " " + str(data['seasonNumber']) + "x00" lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, extra=url, url=item.url, contentTitle=fulltitle, fulltitle=fulltitle, show=show)) # Para series directas de una sola temporada else: data = data_serie if data["sortedRepoChilds"]: for child in data["sortedRepoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " 1x00" lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, show=show)) else: for child in data["repoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child fulltitle = child.capitalize().replace('_', ' ') try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " 1x00" lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, show=show)) return lista_episodios
def series_library(item): logger.info("pelisalacarta.channels.tvvip series_library") # Funcion unicamente para añadir/actualizar series a la libreria itemlist = [] lista_episodios = [] show = item.show.strip() data_serie = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) data_serie = jsontools.load_json(data_serie) # Para series que en la web se listan divididas por temporadas if data_serie["sortedPlaylistChilds"]: for season_name in data_serie["sortedPlaylistChilds"]: url_season = "http://tv-vip.com/json/playlist/%s/index.json" % season_name['id'] data = scrapertools.anti_cloudflare(url_season, host=host, headers=headers) data = jsontools.load_json(data) if data["sortedRepoChilds"]: for child in data["sortedRepoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " " + str(data['seasonNumber']) + "x00" lista_episodios.append( item.clone(action="findvideos", server="", title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle, show=show)) else: for child in data["repoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child fulltitle = child.capitalize().replace('_', ' ') try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " " + str(data['seasonNumber']) + "x00" lista_episodios.append( item.clone(action="findvideos", server="", title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle, show=show)) # Para series directas de una sola temporada else: data = data_serie if data["sortedRepoChilds"]: for child in data["sortedRepoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " 1x00" lista_episodios.append(item.clone(action="findvideos", server="", title=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, show=show)) else: for child in data["repoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child fulltitle = child.capitalize().replace('_', ' ') try: check_filename = scrapertools.get_season_and_episode(fulltitle) except: fulltitle += " 1x00" lista_episodios.append(item.clone(action="findvideos", server="", title=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, show=show)) try: from platformcode import library library.save_library_tvshow(item, lista_episodios) if "Añadir esta serie" in item.title: library.save_tvshow_in_file(item) error = False except: import traceback logger.info(traceback.format_exc()) error = True if not error: itemlist.append(Item(channel=item.channel, title='Serie añadida correctamente a la biblioteca', action="", folder=False)) else: itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', action="", folder=False)) return itemlist
def verify_playcount_series(item, path): logger.info() """ This method reviews and repairs the PlayCount of a series that has become out of sync with the actual list of episodes in its folder. Entries for missing episodes, seasons, or series are created with the "not seen" mark. Later it is sent to verify the counters of Seasons and Series On return it sends status of True if updated or False if not, usually by mistake. With this status, the caller can update the status of the "verify_playcount" option in "videolibrary.py". The intention of this method is to give a pass that repairs all the errors and then deactivate it. It can be reactivated in the Alpha Video Library menu. """ #logger.debug("item:\n" + item.tostring('\n')) # If you have never done verification, we force it estado = config.get_setting("verify_playcount", "videolibrary") if not estado or estado == False: estado = True # If you have never done verification, we force it else: estado = False if item.contentType == 'movie': # This is only for Series return (item, False) if filetools.exists(path): nfo_path = filetools.join(path, "tvshow.nfo") head_nfo, it = videolibrarytools.read_nfo(nfo_path) # We get the .nfo of the Series if not hasattr(it, 'library_playcounts') or not it.library_playcounts: # If the .nfo does not have library_playcounts we will create it for you logger.error('** It does not have PlayCount') it.library_playcounts = {} # We get the archives of the episodes raiz, carpetas_series, ficheros = next(filetools.walk(path)) # Create an item in the list for each strm found estado_update = False for i in ficheros: if i.endswith('.strm'): season_episode = scrapertools.get_season_and_episode(i) if not season_episode: # The file does not include the season and episode number continue season, episode = season_episode.split("x") if season_episode not in it.library_playcounts: # The episode is not included it.library_playcounts.update({season_episode: 0}) # update the .nfo playCount estado_update = True # We mark that we have updated something if 'season %s' % season not in it.library_playcounts: # Season is not included it.library_playcounts.update({'season %s' % season: 0}) # update the .nfo playCount estado_update = True # We mark that we have updated something if it.contentSerieName not in it.library_playcounts: # Series not included it.library_playcounts.update({item.contentSerieName: 0}) # update the .nfo playCount estado_update = True # We mark that we have updated something if estado_update: logger.error('** Update status: ' + str(estado) + ' / PlayCount: ' + str(it.library_playcounts)) estado = estado_update # it is verified that if all the episodes of a season are marked, tb the season is marked for key, value in it.library_playcounts.items(): if key.startswith("season"): season = scrapertools.find_single_match(key, r'season (\d+)') # We obtain in no. seasonal it = check_season_playcount(it, season) # We save the changes to item.nfo if filetools.write(nfo_path, head_nfo + it.tojson()): return (it, estado) return (item, False)
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info("pelisalacarta.platformcode.library save_library_episodes") # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("pelisalacarta.platformcode.library save_library_episodes No hay lista de episodios, " "salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('pelisalacarta', 'Añadiendo episodios...') p_dialog.update(0, 'Añadiendo episodio...') # fix float porque la division se hace mal en python 2.x t = float(100) / len(episodelist) for i, e in enumerate(episodelist): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title) # Añade todos menos el que dice "Añadir esta serie..." o "Descargar esta serie..." if e.action == "add_serie_to_library" or e.action == "download_all_episodes": continue try: if e.channel == "descargas": season_episode = scrapertools.get_season_and_episode(e.contentTitle.lower()) else: season_episode = scrapertools.get_season_and_episode(e.title.lower()) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) except: continue strm_path = filetools.join(path, "%s.strm" % season_episode) if not filetools.exists(strm_path): # Si no existe season_episode.strm añadirlo item_strm = e.clone(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # si el canal tiene filtro se le pasa el nombre que tiene guardado para que filtre correctamente, if item_strm.list_idiomas: # si viene de library_service se obtiene del fichero tvshow.nfo, propiedad "library_filter_show" if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show.get(serie.channel, "") # si se ha agregado la serie lo obtenemos del titulo. else: item_strm.library_filter_show = serie.title if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) nfo_path = filetools.join(path, "%s.nfo" % season_episode) item_nfo = None if not filetools.exists(nfo_path) and e.infoLabels.get("tmdb_id"): # Si no existe season_episode.nfo añadirlo tmdb.find_and_set_infoLabels_tmdb(e) item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) url_scraper = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % (item_nfo.infoLabels['tmdb_id'], item_nfo.contentSeason, item_nfo.contentEpisodeNumber) filetools.write(nfo_path, url_scraper + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if filetools.exists(nfo_path) and filetools.exists(strm_path): nuevo = not filetools.exists(json_path) if nuevo or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: item_nfo = Item().fromjson(filetools.read(nfo_path, 1)) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if nuevo: logger.info("pelisalacarta.platformcode.library savelibrary Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("pelisalacarta.platformcode.library savelibrary Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("pelisalacarta.platformcode.library savelibrary Fallido: %s" % json_path) fallidos += 1 else: logger.info("pelisalacarta.platformcode.library savelibrary Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: url_scraper = filetools.read(tvshow_path, 0, 1) tvshow_item = Item().fromjson(filetools.read(tvshow_path, 1)) tvshow_item.library_playcounts.update(news_in_playcounts) filetools.write(tvshow_path, url_scraper + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 # ... y actualizamos la biblioteca de Kodi if config.is_xbmc(): update(FOLDER_TVSHOWS, filetools.basename(path) + "/") if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def save_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = next(filetools.walk(path)) ficheros = [filetools.join(path, f) for f in ficheros] nostrm_episodelist = [] for root, folders, files in filetools.walk(path): for file in files: season_episode = scrapertools.get_season_and_episode(file) if season_episode == "" or filetools.exists(filetools.join(path, "%s.strm" % season_episode)): continue nostrm_episodelist.append(season_episode) nostrm_episodelist = sorted(set(nostrm_episodelist)) # Silent es para no mostrar progreso (para videolibrary_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress(config.get_localized_string(20000), config.get_localized_string(60064)) p_dialog.update(0, config.get_localized_string(60065)) channel_alt = generictools.verify_channel(serie.channel) #Preparamos para añadir las urls de emergencia emergency_urls_stat = config.get_setting("emergency_urls", channel_alt) #El canal quiere urls de emergencia? emergency_urls_succ = False channel = __import__('channels.%s' % channel_alt, fromlist=["channels.%s" % channel_alt]) if serie.torrent_caching_fail: #Si el proceso de conversión ha fallado, no se cachean emergency_urls_stat = 0 del serie.torrent_caching_fail new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean tags = [] if config.get_setting("enable_filter", "videolibrary"): tags = [x.strip() for x in config.get_setting("filters", "videolibrary").lower().split(",")] for e in episodelist: headers = {} if e.headers: headers = e.headers if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags): continue try: season_episode = scrapertools.get_season_and_episode(e.title) if not season_episode: continue # Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog if json_path in ficheros: #Si existe el .json sacamos de ahí las urls if overwrite: #pero solo si se se sobrescriben los .json json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json if json_epi.emergency_urls: #si existen las urls de emergencia... e.emergency_urls = json_epi.emergency_urls #... las copiamos else: #y si no... e = emergency_urls(e, channel, json_path, headers=headers) #... las generamos else: e = emergency_urls(e, channel, json_path, headers=headers) #Si el episodio no existe, generamos las urls if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia? if e.emergency_urls: del e.emergency_urls emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog e = emergency_urls(e, channel, json_path, headers=headers) #generamos las urls if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... e.infoLabels = serie.infoLabels #... del canal actual y no el del original e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") if e.videolibray_emergency_urls: del e.videolibray_emergency_urls if e.channel_redir: del e.channel_redir #... y se borran las marcas de redirecciones new_episodelist.append(e) except: if e.contentType == 'episode': logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % e.contentTitle) logger.error(traceback.format_exc()) continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x try: t = float(100) / len(new_episodelist) except: t = 0 last_season_episode = '' for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title) high_sea = e.contentSeason high_epi = e.contentEpisodeNumber if scrapertools.find_single_match(e.title, '[a|A][l|L]\s*(\d+)'): high_epi = int(scrapertools.find_single_match(e.title, 'al\s*(\d+)')) max_sea = e.infoLabels["number_of_seasons"] max_epi = 0 if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1): if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]: max_epi = e.infoLabels["number_of_episodes"] else: max_epi = e.infoLabels["temporada_num_episodios"] season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in nostrm_episodelist: logger.error('Error en la estructura de la Videoteca: Serie ' + serie.contentSerieName + ' ' + season_episode) continue strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_language: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) if item_nfo.emergency_urls: del item_nfo.emergency_urls #Solo se mantiene en el .json del episodio nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) # En series multicanal, prevalece el infolabels del canal actual y no el del original if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \ and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentSerieName] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break #logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi))) #logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi))) if not silent: p_dialog.close() if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled": # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) #Si la operación de insertar/borrar urls de emergencia en los .jsons de los episodios ha tenido éxito, se marca el .nfo if emergency_urls_succ: if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict): del tvshow_item.emergency_urls if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces if not tvshow_item.emergency_urls: tvshow_item.emergency_urls = dict() if tvshow_item.library_urls.get(serie.channel, False): tvshow_item.emergency_urls.update({serie.channel: True}) elif emergency_urls_stat == 2: #Operación de Borrar enlaces if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False): tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo if tvshow_item.active == 30: tvshow_item.active = 1 if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]: tvshow_item.infoLabels = serie.infoLabels tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0: tvshow_item.active = 0 # ... no la actualizaremos más logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \ (serie.contentSerieName, serie.channel)) update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % tvshow_item.contentSerieName) logger.error(traceback.format_exc()) fallidos = -1 else: # ... si ha sido correcto actualizamos la videoteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def save_library_episodes(path, episodelist): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info("pelisalacarta.platformcode.library savelibrary_episodes") insertados = 0 sobreescritos = 0 fallidos = 0 # progress dialog p_dialog = platformtools.dialog_progress('pelisalacarta', 'Añadiendo episodios...') p_dialog.update(0, 'Añadiendo episodio...') # fix float porque la division se hace mal en python 2.x t = float(100) / len(episodelist) addon_name = sys.argv[0].strip() if not addon_name: addon_name = "plugin://plugin.video.pelisalacarta/" for i, e in enumerate(episodelist): p_dialog.update(int(math.ceil(i * t)), 'Añadiendo episodio...', e.title) # Añade todos menos el que dice "Añadir esta serie..." o "Descargar esta serie..." if e.action == "add_serie_to_library" or e.action == "download_all_episodes": continue e.action = "play_from_library" e.category = "Series" nuevo = False filename = "{0}.strm".format(scrapertools.get_season_and_episode(e.title.lower())) fullfilename = join_path(path, filename) # logger.debug(fullfilename) if not path_exists(fullfilename): nuevo = True if save_file('{addon}?{url}'.format(addon=addon_name, url=e.tourl()), fullfilename): if nuevo: insertados += 1 else: sobreescritos += 1 else: fallidos += 1 if p_dialog.iscanceled(): break p_dialog.close() logger.debug("insertados= {0}, sobreescritos={1}, fallidos={2}".format(insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos