def list_all(item): logger.info() itemlist = [] data = get_source(item.url) patron = '39;src=.*?(http.*?)style=display:.*?one-line href=(.*?) title=.*?>(.*?)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle in matches: url = scrapedurl scrapedtitle = scrapedtitle.replace('×', 'x') contentSerieName = scrapedtitle action = 'seasons' if 'episode' in item.url: scrapedtitle, season, episode = scrapertools.find_single_match( scrapedtitle, '(.*?) (\d+).*?(?:x|X).*?(\d+)') contentSerieName = scrapedtitle scrapedtitle = '%sx%s - %s' % (season, episode, scrapedtitle) action = 'findvideos' thumbnail = scrapedthumbnail new_item = Item(channel=item.channel, title=scrapedtitle, url=url, thumbnail=thumbnail, contentSerieName=contentSerieName, action=action, context=filtertools.context(item, list_language, list_quality)) if 'episode' in item.url: new_item.contentSeasonNumber = season new_item.contentepisodeNumber = episode new_item.context = [] itemlist.append(new_item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginacion next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?)>ยป</a>') if next_page != '': itemlist.append( Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', type=item.type)) return itemlist