def list_news(item): logger.info() itemlist = [] listed = [] next = False data = get_source(item.url) patron = '<td><a href=([^ ]+) target="_parent"><img src=([^ ]+) class="s8" alt="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) first = item.first last = first + 19 if last > len(matches): last = len(matches) next = True for url, thumb, title in matches[first:last]: infoLabels = dict() id = scrapertools.find_single_match(url, 'titles/([^/]+)') if item.news_type == 'movies': filter_thumb = thumb.replace( "https://image.tmdb.org/t/p/w185_and_h278_bestv2", "") filter_list = {"poster_path": filter_thumb.strip()} filter_list = filter_list.items() infoLabels['filtro'] = filter_list url = '%ssecure/titles/%s?titleId=%s' % (host, id, id) else: se_ep = scrapertools.get_season_and_episode(title) contentSerieName = title.replace(se_ep.replace('x0', 'x'), '').strip() if not config.get_setting('unify'): title = '%s - %s' % (se_ep, contentSerieName) se_ep = se_ep.split('x') url = '%ssecure/titles/%s?titleId=%s&seasonNumber=%s' % ( host, id, id, se_ep[0]) if url not in listed: new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, infoLabels=infoLabels) if item.news_type == 'movies': new_item.contentTitle = title new_item.action = 'findvideos' listed.append(url) else: ep = int(se_ep[1]) new_item.contentSerieName = contentSerieName new_item.url += '&episodeNumber=%s' % ep new_item.ep_info = ep - 1 #new_item.infoLabels['season'] = se_ep[0] new_item.infoLabels['episode'] = ep listed.append(url) itemlist.append(new_item) tmdb.set_infoLabels(itemlist, True) if not next: url_next_page = item.url first = last if url_next_page: itemlist.append( item.clone(title="Siguiente >>", url=url_next_page, action='list_news', first=first)) return itemlist