def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">' url = scrapertoolsV2.find_single_match(data, patron).replace("?seriehd", "") seasons = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] for season_url, season in seasons: season_url = urlparse.urljoin(url, season_url) episodes = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) itemlist.append( Item(channel=item.channel, action="findvideos", contentType="episode", title=support.typo(title + ' - ' + item.show, 'bold'), url=episode_url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail)) support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def findvideos(item): log() data = '' matches = support.match(item, 'str="([^"]+)"')[0] if matches: for match in matches: data += str(jsfunctions.unescape(re.sub('@|g', '%', match))) data += str(match) log('DATA', data) if 'animepertutti' in data: log('ANIMEPERTUTTI!') else: data = '' itemlist = support.server(item, data) if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist
def peliculas(item): log() itemlist = [] blacklist = ['top 10 anime da vedere'] matches, data = support.match( item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+' ) for url, title, thumb in matches: title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace( "streaming", "") lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))") videoType = '' if 'movie' in title.lower(): videoType = ' - (MOVIE)' if 'ova' in title.lower(): videoType = ' - (OAV)' cleantitle = title.replace(lang, "").replace( '(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip() if not videoType: contentType = "tvshow" action = "episodios" else: contentType = "movie" action = "findvideos" if not title.lower() in blacklist: itemlist.append( Item(channel=item.channel, action=action, contentType=contentType, title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang, '_ [] color kod'), fulltitle=cleantitle, show=cleantitle, url=url, thumbnail=thumb)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">') return itemlist
def last(item): support.log() itemlist = [] infoLabels = {} quality = '' matches = support.match( item, r'<ahref=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0] for url, title, info in matches: title = title.rstrip() infoLabels['year'] = scrapertoolsV2.find_single_match( info, r'\(([0-9]+)\)') quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]') if quality: longtitle = title + support.typo(quality, '_ [] color kod') else: longtitle = title itemlist.append( Item(channel=item.channel, action='findvideos', contentType=item.contentType, title=longtitle, fulltitle=title, show=title, quality=quality, url=url, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist