def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) # fix para renumbertools item.show = scrapertools.find_single_match(data, '<h1 class="Title">(.*?)</h1>') if item.plot == "": item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>') matches = re.compile('href="([^"]+)"><figure><img class="[^"]+" data-original="([^"]+)".+?</h3>' '\s*<p>(.*?)</p>', re.DOTALL).findall(data) if matches: for url, thumb, title in matches: title = title.strip() url = urlparse.urljoin(item.url, url) # thumbnail = item.thumbnail try: episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, fanart=item.thumbnail, contentType="episode")) else: # no hay thumbnail matches = re.compile('<a href="(/ver/[^"]+)"[^>]+>(.*?)<', re.DOTALL).findall(data) for url, title in matches: title = title.strip() url = urlparse.urljoin(item.url, url) thumb = item.thumbnail try: episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, fanart=item.thumbnail, contentType="episode")) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data logger.debug("info %s " % data) # obtener el numero total de episodios total_episode = 0 patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>' matches = scrapertools.find_multiple_matches(data, patron_caps) # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>') patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>' scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info) scrapedthumbnail = host + scrapedthumbnail for cap, link, name in matches: title = "" pat = "/" if "Mike, Lu & Og"==item.title: pat="&/" if "KND" in item.title: pat="-" # varios episodios en un enlace if len(name.split(pat)) > 1: i = 0 for pos in name.split(pat): i = i + 1 total_episode += 1 season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode) if len(name.split(pat)) == i: title += "%sx%s " % (season, str(episode).zfill(2)) else: title += "%sx%s_" % (season, str(episode).zfill(2)) else: total_episode += 1 season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode) title += "%sx%s " % (season, str(episode).zfill(2)) url = host + "/" + link if "disponible" in link: title += "No Disponible aún" else: title += name itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot, thumbnail=scrapedthumbnail)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=show)) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data # obtener el numero total de episodios total_episode = 0 patron_caps = '<li><a href="(.*?)">(.*?)-(.*?)<\/a><\/li>' matches = scrapertools.find_multiple_matches(data, patron_caps) patron_info = '<img src="([^"]+)"><div class="ds"><p>(.*?)<\/p>' scrapedthumbnail, scrapedplot = scrapertools.find_single_match(data, patron_info) show = item.title scrapedthumbnail = host + scrapedthumbnail for link, cap, name in matches: title = "" pat = "/" if "Mike, Lu & Og"==item.title: pat="&/" if "KND" in item.title: pat="-" # varios episodios en un enlace if len(name.split(pat)) > 1: i = 0 for pos in name.split(pat): i = i + 1 total_episode += 1 season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode) if len(name.split(pat)) == i: title += "%sx%s " % (season, str(episode).zfill(2)) else: title += "%sx%s_" % (season, str(episode).zfill(2)) else: total_episode += 1 season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode) title += "%sx%s " % (season, str(episode).zfill(2)) url = host + "/" + link if "disponible" in link: title += "No Disponible aún" else: title += name itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot, thumbnail=scrapedthumbnail)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", show=show)) return itemlist
def episodesxseason(item): logger.info() itemlist = [] soup = create_soup(item.url) episodes = soup.find("div", {"id": item.dt}) total_episode = 0 infoLabels = item.infoLabels for episode in episodes.find_all("a"): scrapedurl = "/" + episode["href"] scrapedtitle = episode.find("li").text infoLabels['episode'] = scrapedtitle.split(" -")[0].split(" ")[1] pat = "/" if not "KND" in item.title else "-" name = scrapedtitle.split(" -")[1] title = '' if len(name.split(pat)) > 1: for j, pos in enumerate(name.split(pat)): total_episode += 1 season, episode_numb = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, infoLabels['season'], total_episode) if len(name.split(pat)) == j + 1: title += "{}x{:02d}".format(season, episode_numb) else: title += "{}x{:02d}_".format(season, episode_numb) else: total_episode += 1 season, episode_numb = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, infoLabels['season'], total_episode) title += "{}x{:02d}".format(season, episode_numb) title = "{} - {}".format(title, scrapedtitle.split(" -")[1]) itemlist.append( Item(channel=item.channel, title=title, contentSerieName=item.title, url=urlparse.urljoin(host, scrapedurl), plot=item.plot, thumbnail=item.thumbnail, action="findvideos", context=item.context, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, True) return itemlist
def episodios(item): logger.info() itemlist = [] data = get_source(item.url) anime_info = eval(scrapertools.find_single_match(data, "var anime_info = ([^;]+);")) episodes = eval(scrapertools.find_single_match(data, "var episodes = ([^;]+);")) infoLabels = item.infoLabels for episode in episodes: lang = 'VOSE' if item.contentSerieName: season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode)) title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) infoLabels['season'] = season infoLabels['episode'] = episode else: title = item.contentTitle url = '%sver/%s-capitulo-%s' % (host, anime_info[0], episode) itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, action='findvideos', language=lang, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) itemlist = itemlist[::-1] if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def episodios(item): logger.info() itemlist = [] html_serie = get_url_contents(item.url) info_serie = __extract_info_from_serie(html_serie) plot = info_serie[3] if info_serie else '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) title = "{0}x{1:02d} {2} ({3})".format( season, episode, "Episodio " + str(episode), date) # El enlace pertenece a una pelicula else: title = "{0} ({1})".format(title, date) item.url = url es_pelicula = True logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, item.thumbnail)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=plot, show=item.show, fulltitle="{0} {1}".format(item.show, title), viewmode="movies_with_plot", folder=True)) # El sistema soporta la videoteca y se encontro por lo menos un episodio # o pelicula if config.get_videolibrary_support() and len(itemlist) > 0: if es_pelicula: item_title = "Añadir película a la videoteca" item_action = "add_pelicula_to_library" item_extra = "" else: item_title = "Añadir serie a la videoteca" item_action = "add_serie_to_library" item_extra = "episodios" itemlist.append(Item(channel=item.channel, title=item_title, url=item.url, action=item_action, extra=item_extra, show=item.show)) if not es_pelicula: itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def novedades_episodios(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = scrapertools.find_single_match(data, '<h2>Últimos episodios</h2>.+?<ul class="ListEpisodios[^>]+>(.*?)</ul>') matches = re.compile('<a href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>' '<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data) itemlist = [] for url, thumbnail, str_episode, show in matches: try: episode = int(str_episode.replace("Episodio ", "")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) title = "%s: %sx%s" % (show, season, str(episode).zfill(2)) url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail, fulltitle=title) itemlist.append(new_item) return itemlist
def new_episodes(item): logger.info() itemlist = [] infoLabels = dict() full_data = get_source(item.url) data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>') patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?' patron += 'class="vista2">([^<]+)</span>.*?' patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, _type, epi, scrapedtitle in matches: _type = _type.strip().lower() url = scrapedurl lang, title = clear_title(scrapedtitle) season, episode = renumbertools.numbered_for_tratk(item.channel, title, 1, int(epi)) scrapedtitle += " - %sx%s" % (season, str(episode).zfill(2)) if lang != 'VOSE' and not config.get_setting('unify'): scrapedtitle += ' [COLOR gold][%s][/COLOR]' % lang itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=url, thumbnail=scrapedthumbnail, action='findvideos', language=lang, plot=_type.capitalize(), type=_type, contentSerieName=title, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub('\n|\s{2,}', '', data) show = scrapertools.find_single_match(data, '<div class="x-title">(.*?)</div>') show = re.sub( r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show) if not item.infoLabels["plot"]: item.infoLabels["plot"] = scrapertools.find_single_match( data, '<div class="x-sinopsis">\s*(.*?)</div>') bloque = scrapertools.find_single_match(data, '<ul class="list"(.*?)</ul>') matches = scrapertools.find_multiple_matches( bloque, '<li><a href="([^"]+)" title="([^"]+)"') for url, title in matches: url = host + url epi = scrapertools.find_single_match( title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName) new_item = item.clone(action="findvideos", url=url, title=title, extra="") if epi: season, episode = renumbertools.numbered_for_tratk( item.channel, show, 1, int(epi)) new_item.infoLabels["episode"] = episode new_item.infoLabels["season"] = season new_item.title = "%sx%s %s" % (season, episode, title) itemlist.append(new_item) if item.infoLabels.get( "tmdb_id" ) or item.extra == "recientes" or item.extra == "completo": try: from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) except: pass if config.get_videolibrary_support() and itemlist: itemlist.append( Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle, contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart, thumbnail=item.thumbnail)) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) if item.plot == "": item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>') data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>') matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data) for url, title in matches: title = title.strip() url = urlparse.urljoin(item.url, url) thumbnail = item.thumbnail try: episode = int(scrapertools.find_single_match(title, "Episodio (\d+)")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title, fanart=thumbnail, contentType="episode")) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) if item.plot == "": item.plot = scrapertools.find_single_match( data, 'Description[^>]+><p>(.*?)</p>') data = scrapertools.find_single_match( data, '<div class="Sect Episodes full">(.*?)</div>') matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data) for url, title in matches: title = title.strip() url = urlparse.urljoin(item.url, url) thumbnail = item.thumbnail try: episode = int( scrapertools.find_single_match(title, "Episodio (\d+)")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, 1, episode) title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) itemlist.append( item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title, fanart=thumbnail, contentType="episode")) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) info = scrapertools.find_single_match(data, "anime_info = \[(.*?)\];") info = eval(info) episodes = eval( scrapertools.find_single_match(data, "var episodes = (.*?);")) for episode in episodes: url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0]) season = 1 season, episodeRenumber = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, season, int(episode[0])) #title = '1x%s Episodio %s' % (episode[0], episode[0]) title = '%sx%s Episodio %s' % (season, str(episodeRenumber).zfill(2), episodeRenumber) itemlist.append( item.clone(title=title, url=url, action='findvideos', contentSerieName=item.contentSerieName)) itemlist = itemlist[::-1] if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.contentSerieName)) return itemlist
def episodios(item): logger.info() itemlist = [] html_serie = get_url_contents(item.url) info_serie = __extract_info_from_serie(html_serie) plot = info_serie[3] if info_serie else '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) title = "{0}x{1:02d} {2} ({3})".format( season, episode, "Episodio " + str(episode), date) # El enlace pertenece a una pelicula else: title = "{0} ({1})".format(title, date) item.url = url es_pelicula = True logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, item.thumbnail)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=plot, show=item.show, fulltitle="{0} {1}".format(item.show, title), viewmode="movies_with_plot", folder=True)) # El sistema soporta la biblioteca y se encontro por lo menos un episodio # o pelicula if config.get_library_support() and len(itemlist) > 0: if es_pelicula: item_title = "Añadir película a la biblioteca" item_action = "add_pelicula_to_library" item_extra = "" else: item_title = "Añadir serie a la biblioteca" item_action = "add_serie_to_library" item_extra = "episodios" itemlist.append(Item(channel=item.channel, title=item_title, url=item.url, action=item_action, extra=item_extra, show=item.show)) if not es_pelicula: itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def novedades_episodios(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = scrapertools.find_single_match(data, '<ul class="ListEpisodios[^>]+>(.*?)</ul>') matches = re.compile('href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>' '<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data) itemlist = [] for url, thumbnail, str_episode, show in matches: try: episode = int(str_episode.replace("Ep. ", "")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) title = "%s: %sx%s" % (show, season, str(episode).zfill(2)) url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail, fulltitle=title) itemlist.append(new_item) return itemlist
def episodios(item): logger.info() itemlist = [] data = get_source(item.url) info = scrapertools.find_single_match(data, "anime_info = \[(.*?)\];") info = eval(info) episodes = eval(scrapertools.find_single_match(data, "var episodes = (.*?);")) infoLabels = item.infoLabels for episode in episodes: url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0]) season = 1 season, episodeRenumber = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, season, int(episode[0])) infoLabels['season'] = season infoLabels['episode'] = episodeRenumber title = '%sx%s Episodio %s' % (season, str(episodeRenumber).zfill(2), episodeRenumber) itemlist.append(item.clone(title=title, url=url, action='findvideos', contentSerieName=item.contentSerieName, infoLabels=infoLabels)) itemlist = itemlist[::-1] if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def episodios(item): logger.info() itemlist = [] data = get_source(item.url) patron = '<a class="item" href="([^"]+)">' matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedurl in matches: episode = scrapertools.find_single_match(scrapedurl, '.*?episodio-(\d+)') lang = item.language season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode)) title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) url = scrapedurl infoLabels['season'] = season infoLabels['episode'] = episode itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, action='findvideos', language=lang, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) itemlist = itemlist[::-1] if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def episodios(item): logger.info() itemlist = [] full_data = get_source(item.url) data = scrapertools.find_single_match(full_data, '<ul class="list-episodies scrolling">(.*?)</ul>') patron = '<a href="([^"]+)".*?title="([^"]+)".*?Episodio (\d+)' matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedurl, title, episode in matches: if 'latino' in title.lower(): lang='Latino' else: lang = 'VOSE' season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode)) title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) url = scrapedurl infoLabels['season'] = season infoLabels['episode'] = episode itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, action='findvideos', language=lang, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) itemlist = itemlist[::-1] if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def novedades_episodios(item): logger.info() itemlist = [] patr = '<h2>Últimos episodios</h2>.+?<ul class="ListEpisodios[^>]+>(.*?)</ul>' data = get_source(item.url, patron=patr) patron = '<a href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>' patron += '<strong class="Title">(.*?)</strong>' matches = re.compile(patron, re.DOTALL).findall(data) for url, thumbnail, str_episode, show in matches: try: episode = int(str_episode.replace("Episodio ", "")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, episode) title = "%s: %sx%s" % (show, season, str(episode).zfill(2)) url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, contentSerieName=show, thumbnail=thumbnail) itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<span class="icon-triangulo-derecha"></span>.*?<a href="([^"]+)">([^"]+) (\d+)' matches = scrapertools.find_multiple_matches(data, patron) for url, scrapedtitle, episode in matches: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( item.channel, scrapedtitle, season, episode) title = "%sx%s %s" % (season, str(episode).zfill(2), scrapedtitle) itemlist.append(item.clone(title=title, url=url, action='findvideos')) if config.get_videolibrary_support: itemlist.append( Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) return itemlist
def episodios(item): logger.info() itemlist = [] data = get_source(item.url) list_episodes = eval(scrapertools.find_single_match(data, 'var episodios = (.*?);')) infoLabels = item.infoLabels for episode in list_episodes: episode = int(episode[0]) lang = 'VOSE' season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, episode) title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) url = item.url.replace(host, '%s%s/' % (host, episode)) thumbnail = '%sSubidas/anime/miniaturas/t_%s_%s' % (host, episode, item.thumb) infoLabels['season'] = season infoLabels['episode'] = episode itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, action='findvideos', language=lang, thumbnail=thumbnail, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) itemlist = itemlist[::-1] if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron_caps = '<li><strong><a href="([^"]+)">(.+?)–(.+?)<\/a>' matches = scrapertools.find_multiple_matches(data, patron_caps) show = scrapertools.find_single_match(data, '<h3><strong>.+?de (.+?)<\/strong>') scrapedplot = scrapertools.find_single_match(data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>') for link, cap, name in matches: if 'x' in cap: title = cap + " - " + name else: season = 1 episode = int(cap) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) date = name title = "{0}x{1:02d} {2} ({3})".format( season, episode, "Episodio " + str(episode), date) # title = cap+" - "+name url = link itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=scrapedplot, show=show)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=show)) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<div class="pagina">(.*?)cajaSocial' data = scrapertools.find_single_match(data, patron) patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>" matches = scrapertools.find_multiple_matches(data, patron_caps) #show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>') scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>") scrapedplot = scrapertools.find_single_match( data, '<span>Descripcion.+?<\/span>(.+?)<br>') i = 0 temp = 0 infoLabels = item.infoLabels for link, cap, name in matches: if int(cap) == 1: temp = temp + 1 if int(cap) < 10: cap = "0" + cap season = temp episode = int(cap) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) infoLabels['season'] = season infoLabels['episode'] = episode date = name title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date) # title = str(temp)+"x"+cap+" "+name url = host + "/" + link if "NO DISPONIBLE" not in name: itemlist.append( Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail, plot=scrapedplot, url=url, contentSeasonNumber=season, contentEpisodeNumber=episode, contentSerieName=item.contentSerieName, infoLabels=infoLabels)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item( channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data_lista = scrapertools.find_single_match(data, '<div class="su-list su-list-style-"><ulclass="lista-capitulos">.+?<\/div><\/p>') if '×' in data_lista: data_lista = data_lista.replace('×', 'x') show = item.title if "[Latino]" in show: show = show.replace("[Latino]", "") if "Ranma" in show: patron_caps = '<\/i> <strong>.+?Capitulo ([^"]+)\: <a .+? href="([^"]+)">([^"]+)<\/a>' else: patron_caps = '<\/i> <strong>Capitulo ([^"]+)x.+?\: <a .+? href="([^"]+)">([^"]+)<\/a>' matches = scrapertools.find_multiple_matches(data_lista, patron_caps) scrapedplot = scrapertools.find_single_match(data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>') number = 0 ncap = 0 A = 1 for temp, link, name in matches: if A != temp: number = 0 if "Ranma" in show: number = int(temp) temp = str(1) else: number = number + 1 if number < 10: capi = "0" + str(number) else: capi = str(number) if "Ranma" in show: season = 1 episode = number season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) date = name if episode < 10: capi = "0" + str(episode) else: capi = episode title = str(season) + "x" + str(capi) + " - " + name # "{0}x{1} - ({2})".format(season, episode, date) else: title = str(temp) + "x" + capi + " - " + name url = link A = temp itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=show)) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>' data = scrapertools.find_single_match(data, patron) patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>" matches = scrapertools.find_multiple_matches(data, patron_caps) show = scrapertools.find_single_match( data, '<span>Titulo.+?<\/span>(.+?)<br><span>') scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>") scrapedplot = scrapertools.find_single_match( data, '<span>Descripcion.+?<\/span>(.+?)<br>') i = 0 temp = 0 for link, cap, name in matches: if int(cap) == 1: temp = temp + 1 if int(cap) < 10: cap = "0" + cap season = temp episode = int(cap) if config.is_xbmc(): season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) date = name title = "{0}x{1:02d} {2} ({3})".format(season, episode, "Episodio " + str(episode), date) #title = str(temp)+"x"+cap+" "+name url = host + "/" + link if "NO DISPONIBLE" in name: name = name else: itemlist.append( Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail, plot=scrapedplot, url=url, show=show)) if config.get_library_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de Kodi", url=item.url, action="add_serie_to_library", extra="episodios", show=show)) return itemlist
def episodios(item): logger.info() itemlist = [] html_serie = get_url_contents(item.url) info_serie = __extract_info_from_serie(html_serie) if info_serie[3]: plot = info_serie[3] else: plot = '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') new_item=itemlist.append(Item(channel=item.channel, action="findvideos", url=url, thumbnail=item.thumbnail, plot=plot, show=item.show)) # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, season, episode) new_item.infoLabels["episode"] = episode new_item.infoLabels["season"] = season new_item.contentSerieName = item.contentSerieName title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date) # El enlace pertenece a una pelicula else: title = "%s (%s)" % (title, date) item.url = url es_pelicula = True new_item.title=title new_item.fulltitle="%s %s" % (item.show, title) itemlist.append(new_item) # El sistema soporta la videoteca y se encontro por lo menos un episodio # o pelicula if config.get_videolibrary_support() and len(itemlist) > 0: if es_pelicula: item_title = "Añadir película a la videoteca" item_action = "add_pelicula_to_library" item_extra = "" else: item_title = "Añadir serie a la videoteca" item_action = "add_serie_to_library" item_extra = "episodios" itemlist.append(Item(channel=item.channel, title=item_title, url=item.url, action=item_action, extra=item_extra, show=item.show)) if not es_pelicula: itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def episodios(item, final=True): logger.info() itemlist = [] infoLabels = item.infoLabels data = httptools.downloadpage(item.url).data data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)') CHANNEL_HEADERS = [["Host", "m.animeid.tv"], ["X-Requested-With", "XMLHttpRequest"]] page = 0 while True: page += 1 u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" % (data_id, page) data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data # Cuando ya no hay datos devuelve: "list":[] if '"list":[]' in data: break dict_data = jsontools.load(data) list = dict_data['list'][::-1] for dict in list: season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, 1, int(dict["numero"])) title = "%sx%s - %s" % (season, str(episode).zfill(2), dict["date"]) infoLabels['season'] = season infoLabels['episode'] = episode itemlist.append( Item(action="findvideos", channel=item.channel, title=title, url=CHANNEL_HOST + dict['href'], thumbnail=item.thumbnail, show=item.show, infoLabels=infoLabels, viewmode="movie_with_plot")) if config.get_videolibrary_support(): itemlist.append( Item( channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item( channel=item.channel, title= "[COLOR white]Descargar todos los episodios de la serie[/COLOR]", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def episodesxfolder(item): logger.info() itemlist = list() if not item.init: item.init = None if not item.fin: item.fin = None soup = create_soup(item.url) matches = soup.find_all("div", {"data-episode": True}) infoLabels = item.infoLabels for elem in matches[item.init:item.fin]: scrapedurl = elem.a["href"] episode = scrapertools.find_single_match(scrapedurl, '.*?episodio-(\d+)') lang = item.language season, episode = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, 1, int(episode)) title = "%sx%s - %s" % (season, str(episode).zfill(2), item.contentSerieName) url = scrapedurl infoLabels['season'] = season infoLabels['episode'] = episode itemlist.append( Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, action='findvideos', language=lang, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if not item.extra: if item.contentSerieName != '' and config.get_videolibrary_support( ) and len(itemlist) > 0 and not item.foldereps: itemlist.append( Item(channel=item.channel, title= '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def episodios(item): logger.info() itemlist = [] data = get_source(item.url) patron = '<li id="epi-.*? class="list-group-item.*?"><a href="([^"]+)".*?' patron += 'class="badge".*?width="25" title="([^"]+)">.*?<\/span>(.*?) (\d+)<\/li>' matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedurl, scrapedlang, scrapedtitle, episode in matches: language = scrapedlang season, episode = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, 1, int(episode)) title = scrapedtitle + " " + str(season) + "x" + str(episode) url = scrapedurl infoLabels['season'] = str(season) infoLabels['episode'] = str(episode) itemlist.append( Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, action='findvideos', language=IDIOMAS[language], infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item( channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def episodios(item): logger.info() itemlist = [] infoLabels = item.infoLabels if alfa_assistant.is_alfa_installed(): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) if item.plot == "": item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>') data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>') matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data) for url, title in matches: title = title.strip() url = urlparse.urljoin(item.url, url) thumbnail = item.thumbnail try: episode = int(scrapertools.find_single_match(title, "Episodio (\d+)")) except ValueError: season = 1 episode = 1 else: season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, episode) infoLabels['season'] = season infoLabels['episode'] = episode title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, contentTitle=title, fanart=thumbnail, contentType="episode", infoLabels=infoLabels, contentSerieName=item.contentSerieName,)) itemlist.reverse() tmdb.set_infoLabels(itemlist, seekTmdb=True) else: return [] return itemlist
def episodios(item): logger.info() itemlist = [] data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) patron = "<p><span>(.*?)</span>" aux_plot = scrapertools.find_single_match(data, patron) patron = '<td><ahref="([^"]+)">(.*?)</a></td><td>(.*?)</td>' matches = re.compile(patron, re.DOTALL).findall(data) pelicula = False for scrapedurl, scrapedtitle, scrapeddate in matches: title = scrapedtitle.strip() # scrapertools.unescape(scrapedtitle) url = scrapedurl thumbnail = item.thumbnail plot = aux_plot # item.plot date = scrapeddate.strip() # TODO crear funcion que pasandole el titulo y buscando en un array de series establezca el valor el nombre # y temporada / capitulo para que funcione con trak.tv season = 1 episode = 1 patron = "Episodio\s+(\d+)" # logger.info("title {0}".format(title)) # logger.info("patron {0}".format(patron)) try: episode = scrapertools.get_match(title, patron) episode = int(episode) # logger.info("episode {0}".format(episode)) except IndexError: pelicula = True pass except ValueError: pass if pelicula: title = "{0} ({1})".format(title, date) logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, thumbnail)) item.url = url itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle="{0} {1}".format(item.show, title), fanart=thumbnail, viewmode="movies_with_plot", folder=True)) else: season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) title = "{0}x{1:02d} {2} ({3})".format(season, episode, "Episodio " + str(episode), date) logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, thumbnail)) itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, show=item.show, fulltitle="{0} {1}".format(item.show, title), fanart=thumbnail, viewmode="movies_with_plot", folder=True)) if config.get_library_support() and len(itemlist) > 0 and not pelicula: itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) elif config.get_library_support() and len(itemlist) == 1 and pelicula: itemlist.append( Item(channel=item.channel, action="add_pelicula_to_library", url=item.url, title="Añadir película a la biblioteca", thumbnail=item.thumbnail, fulltitle=item.fulltitle)) return itemlist
def episodios(item): logger.info() itemlist = list() first_url = "" infoLabels = item.infoLabels item.contex = [{ "title": "Config Saltar Intro en Serie", "action": "set_skip_time", "channel": item.channel }] soup = create_soup(item.url) ep_list = soup.find("ul", class_="donghua-list") try: plot = soup.find("p", class_="text-justify fc-dark").text except: plot = "" try: state = soup.find("span", class_="badge bg-success") except: state = '' for elem in ep_list.find_all("a"): url = host + elem["href"] if not first_url and state: first_url = url epi_num = scrapertools.find_single_match(elem.text.strip(), "(\d+)$") season, episode = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, 1, int(epi_num)) infoLabels['season'] = season infoLabels['episode'] = episode title = '%sx%s - Episodio %s' % (season, episode, episode) itemlist.append( Item(channel=item.channel, title=title, url=url, plot=plot, thumbnail=item.thumbnail, action='findvideos', infoLabels=infoLabels, context=item.contex)) itemlist = itemlist[::-1] if first_url and state: epi_num = int(scrapertools.find_single_match(first_url, "(\d+)$")) + 1 url = re.sub("(\d+)$", str(epi_num), first_url) season, episode = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, 1, int(epi_num)) infoLabels['season'] = season infoLabels['episode'] = episode title = '%sx%s - Episodio %s' % (season, episode, episode) try: data = httptools.downloadpage(url).data #logger.error(data) matches = scrapertools.find_multiple_matches(data, "(eval.*?)\n") if matches: itemlist.append( Item(channel=item.channel, title=title, url=url, plot=plot, thumbnail=item.thumbnail, action='findvideos', infoLabels=infoLabels, context=item.contex)) except: pass tmdb.set_infoLabels_itemlist(itemlist, True) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item( channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1='library')) return itemlist
def episodios(item): logger.info() itemlist = [] if clone and OGHOST in item.url: item.url = item.url.replace(OGHOST, HOST) data = get_source(item.url) if clone: data = scrapertools.find_single_match( data, '<ul class="ListCaps".+?>(.+?)</ul') patron = '(?is)href="(.+?)".+?\ssrc="(.+?)".+?class="title.+?p>.+?(\d+).*?</.+?' episodes = scrapertools.find_multiple_matches(data, patron) for url, thumb, epnum in episodes: season = 1 season, episodeRenumber = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, season, int(epnum)) title = '{}x{} Episodio {}'.format(season, str(episodeRenumber).zfill(2), episodeRenumber) url = urlparse.urljoin(HOST, url) infoLabels = item.infoLabels infoLabels['season'] = season infoLabels['episode'] = episodeRenumber itemlist.append( item.clone(action="findvideos", channel=item.channel, infoLabels=infoLabels, thumbnail=thumb, title=title, url=url)) else: info = scrapertools.find_single_match(data, "anime_info = \[(.*?)\];") info = eval(info) episodes = eval( scrapertools.find_single_match(data, "var episodes = (.*?);")) infoLabels = item.infoLabels for episode in episodes: url = '{}/ver/{}/{}-{}'.format(HOST, episode[1], info[2], episode[0]) season = 1 season, episodeRenumber = renumbertools.numbered_for_tratk( item.channel, item.contentSerieName, season, int(episode[0])) infoLabels['season'] = season infoLabels['episode'] = episodeRenumber title = '{}x{} Episodio {}'.format(season, str(episodeRenumber).zfill(2), episodeRenumber) itemlist.append( item.clone(title=title, url=url, action='findvideos', contentSerieName=item.contentSerieName, infoLabels=infoLabels)) if not item.extra: itemlist = itemlist[::-1] tmdb.set_infoLabels(itemlist, seekTmdb=True) itemlist = sorted(itemlist, key=lambda it: it.title) if config.get_videolibrary_support() and len( itemlist ) > 0 and not item.action == 'add_serie_to_library' and not item.extra: itemlist.append( Item(channel=item.channel, title=config.get_localized_string(60352), url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data # obtener el numero total de episodios total_episode = 0 patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>' matches = scrapertools.find_multiple_matches(data, patron_caps) # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>') patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>' scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match( data, patron_info) scrapedthumbnail = host + scrapedthumbnail for cap, link, name in matches: title = "" pat = "as/sd" # varios episodios en un enlace if len(name.split(pat)) > 1: i = 0 for pos in name.split(pat): i = i + 1 total_episode += 1 if config.is_xbmc(): season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, 1, total_episode) if len(name.split(pat)) == i: title += "{0}x{1:02d} ".format(season, episode) else: title += "{0}x{1:02d}_".format(season, episode) else: total_episode += 1 if config.is_xbmc(): season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, 1, total_episode) if not config.is_xbmc(): season = 1 episode = total_episode title += "{0}x{1:02d} ".format(season, episode) url = host + "/" + link if "disponible" in link: title += "No Disponible aún" else: title += name itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot, thumbnail=scrapedthumbnail)) if config.get_library_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de Kodi", url=item.url, action="add_serie_to_library", extra="episodios", show=show)) return itemlist