def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">' url = scrapertoolsV2.find_single_match(data, patron).replace("?seriehd", "") seasons = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] for season_url, season in seasons: season_url = urlparse.urljoin(url, season_url) episodes = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) itemlist.append( Item(channel=item.channel, action="findvideos", contentType="episode", title=support.typo(title + ' - ' + item.show, 'bold'), url=episode_url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail)) support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def mainlist(item): support.log() itemlist = [] support.menu(itemlist, 'Film', 'peliculas', host + "/film/") support.menu(itemlist, 'Film Anime', 'peliculas', host + "/genere/anime/") support.menu(itemlist, 'Film per genere', 'generos', host) support.menu(itemlist, 'Serie TV', 'peliculas', host + "/serietv/", contentType='tvshow') support.menu(itemlist, 'Anime', 'peliculas', host + "/genere/anime/", contentType='tvshow') support.menu(itemlist, 'Cerca film', 'search', host) support.menu(itemlist, 'Cerca serie tv', 'search', host, contentType='tvshow') autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) return itemlist
def peliculas_list(item): support.log() block = r'<tbody>(.*)<\/tbody>' patron = r'<td class="mlnh-thumb"><a href="([^"]+)" title="([^"]+)".*?> <img.*?src="([^"]+)".*?<td class="mlnh-3">([0-9]+)<\/td><td class="mlnh-4">(.*?)<\/td>' return support.scrape(item, patron, ['url', 'title', 'year', 'quality'], patron_block=block)
def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>') itemlist.append( Item(channel=item.channel, action='findvideos', contentType=item.contentType, title=support.typo('Episodio 1 bold'), fulltitle=item.title, url=item.url, thumbnail=item.thumbnail)) if block: matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data) for url, number in matches: itemlist.append( Item(channel=item.channel, action='findvideos', contentType=item.contentType, title=support.typo('Episodio ' + number,'bold'), fulltitle=item.title, url=url, thumbnail=item.thumbnail)) autorenumber.renumber(itemlist, item) support.videolibrary return itemlist
def write_data(channel, show, data): log() dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) tvshow = show.strip() list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, []) if data: dict_renumerate = {TAG_SEASON_EPISODE: data} dict_series[tvshow] = dict_renumerate else: dict_series.pop(tvshow, None) result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) if result: if data: message = config.get_localized_string(60446) else: message = config.get_localized_string(60444) else: message = config.get_localized_string(70593) heading = show.strip() platformtools.dialog_notification(heading, message)
def last_ep(item): log('ANIME PER TUTTI') return support.scrape( item, '<a href="([^"]+)">([^<]+)<', ['url', 'title'], patron_block='<ul class="mh-tab-content-posts">(.*?)<\/ul>', action='findvideos')
def peliculas(item): support.log() if item.contentType == 'movie' or '/serietv/' not in item.url: patron = r'<div class="?card-image"?>.*?<img src="?([^" ]+)"? alt.*?<a href="?([^" >]+)(?:\/|")>([^<[(]+)(?:\[([A-Za-z0-9/-]+)])? (?:\(([0-9]{4})\))?.*?<strong>([^<>&]+).*?DURATA ([0-9]+).*?<br(?: /)?>([^<>]+)' listGroups = [ 'thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot' ] action = 'findvideos' else: patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?<\/(p|div)>([^<>]+)' listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot'] action = 'episodios' return support.scrape( item, patron_block=[ r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>', '<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)' ], patron=patron, listGroups=listGroups, patronNext= '<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">', blacklist=blacklist, action=action)
def nation(item): log() itemlist = [] menu(itemlist, 'Serie TV Americane', 'peliculas', host + '/serie-tv-streaming/serie-tv-americane/') menu(itemlist, 'Serie TV Italiane', 'peliculas', host + '/serie-tv-streaming/serie-tv-italiane/') return itemlist
def peliculas(item): support.log() if item.extra == 'search': itemlist = support.scrape(item, r'<a href="([^"]+)">\s*<div[^=]+=[^=]+=[^=]+=[^=]+=[^=]+="(.*?)"[^>]+>[^<]+<[^>]+>\s*<h[^=]+="titleFilm">(.*?)<', ['url', 'thumb', 'title'], headers, patronNext='<a class="next page-numbers" href="([^"]+)">') else: itemlist = support.scrape(item, r'<img width[^s]+src="([^"]+)[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)<\/a>[^>]+>[^>]+>[^>]+>(?:[^>]+>|)[^I]+IMDB\:\s*([^<]+)<', ['thumb', 'url', 'title', 'rating'], headers, patronNext='<a class="next page-numbers" href="([^"]+)">') for item in itemlist: item.title = re.sub(r'.\(.*?\)', '', item.title) return itemlist
def findvideos(item): support.log(item.channel + " findvideos") itemlist = support.server(item, data=item.url) # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist
def AZlist(item): support.log() return support.scrape( item, r'<a title="([^"]+)" href="([^"]+)"', ['title', 'url'], headers, patron_block=r'<div class="movies-letter">(.*?)<\/div>', action='peliculas_list', url_host=host)
def renumber(itemlist, item='', typography=''): log() if item: try: dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE) SERIES = dict_series[item.show.rstrip()]['season_episode'] S = SERIES[0] E = SERIES[1] SP = SERIES[2] ID = SERIES[3] page = 1 epList = [] exist = True item.infoLabels['tvdb_id'] = ID tvdb.set_infoLabels_item(item) while exist: data = tvdb.otvdb_global.get_list_episodes(ID, page) if data: for episodes in data['data']: if episodes['airedSeason'] >= S: if E == 0: epList.append([0, SP]) E = 1 if episodes['airedEpisodeNumber'] >= E: epList.append([ episodes['airedSeason'], episodes['airedEpisodeNumber'] ]) page = page + 1 else: exist = False epList.sort() ep = 0 for item in itemlist: s = str(epList[ep][0]) e = str(epList[ep][1]) item.title = typo(s + 'x' + e + ' - ', typography) + item.title ep = ep + 1 except: return itemlist else: for item in itemlist: if item.contentType != 'movie': if item.context: context2 = item.context item.context = context() + context2 else: item.context = context() return itemlist
def config_item(item): log(item) tvdb.find_and_set_infoLabels(item) data = '' data = add_season(data) if not item.infoLabels['tvdb_id']: heading = 'TVDB ID' item.infoLabels['tvdb_id'] = platformtools.dialog_numeric(0, heading) data.append(item.infoLabels['tvdb_id']) write_data(item.from_channel, item.show, data)
def add_season(data=None): log("data= ", data) heading = config.get_localized_string(70686) season = platformtools.dialog_numeric(0, heading) if season != "": heading = config.get_localized_string(70687) episode = platformtools.dialog_numeric(0, heading) if episode != "": return [int(season), int(episode)]
def menu(item): support.log() itemlist = support.scrape(item, '<li><a href="(.*?)">(.*?)</a></li>', ['url', 'title'], headers, patron_block='<ul class="listSubCat" id="' + str(item.args) + '">(.*?)</ul>', action='peliculas') return support.thumb(itemlist)
def categories(item): support.log(item) itemlist = support.scrape( item, '<li><a href="([^"]+)">(.*?)</a></li>', ['url', 'title'], headers, 'Altadefinizione01', patron_block='<ul class="kategori_list">(.*?)</ul>', action='peliculas', url_host=host) return support.thumb(itemlist)
def search(item, texto): support.log("s=", texto) item.url = host + "/?s=" + texto try: return peliculas(item) # Continua la ricerca in caso di errore except Exception, e: import traceback traceback.print_stack() support.log(str(e)) return []
def peliculas(item): log() itemlist = [] blacklist = ['top 10 anime da vedere'] matches, data = support.match( item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+' ) for url, title, thumb in matches: title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace( "streaming", "") lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))") videoType = '' if 'movie' in title.lower(): videoType = ' - (MOVIE)' if 'ova' in title.lower(): videoType = ' - (OAV)' cleantitle = title.replace(lang, "").replace( '(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip() if not videoType: contentType = "tvshow" action = "episodios" else: contentType = "movie" action = "findvideos" if not title.lower() in blacklist: itemlist.append( Item(channel=item.channel, action=action, contentType=contentType, title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang, '_ [] color kod'), fulltitle=cleantitle, show=cleantitle, url=url, thumbnail=thumb)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">') return itemlist
def lista_serie(item): support.log(item.channel + " lista_serie") itemlist = [] PERPAGE = 15 p = 1 if '{}' in item.url: item.url, p = item.url.split('{}') p = int(p) # Descarga la pagina data = httptools.downloadpage(item.url).data # Extrae las entradas patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" scrapedthumbnail = "" if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break title = cleantitle(scrapedtitle) itemlist.append( Item(channel=item.channel, extra=item.extra, action="episodes", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title, plot=scrapedplot, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione if len(matches) >= p * PERPAGE: scrapedurl = item.url + '{}' + str(p + 1) itemlist.append( Item(channel=item.channel, action='lista_serie', contentType=item.contentType, title=support.typo(config.get_localized_string(30992), 'color kod bold'), url=scrapedurl, args=item.args, thumbnail=support.thumb())) return itemlist
def search(item, texto): log(texto) item.url = host + "/?s=" + texto try: return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []
def search(item, text): support.log(item.url, "search", text) try: item.url = item.url + "/?s=" + text return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []
def mainlist(item): support.log() itemlist = [] support.menu(itemlist, 'Film', 'peliculas', host + "/nuove-uscite/") support.menu(itemlist, 'Per Genere submenu', 'menu', host, args='Film') support.menu(itemlist, 'Per Anno submenu', 'menu', host, args='Anno') support.menu(itemlist, 'Sub-IIA', 'peliculas', host + "/sub-ita/") support.menu(itemlist, 'Cerca...', 'search', host, 'movie') autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) return itemlist
def findvideos(item): support.log() itemlist = support.hdpass_get_servers(item) if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) support.videolibrary(itemlist, item ,'color blue bold') return itemlist
def episodes(item): support.log(item.channel + " episodes") itemlist = [] data = httptools.downloadpage(item.url).data patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?' patron += '<p><a href="([^"]+)">' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = cleantitle(scrapedtitle) title = scrapedtitle.split(" S0")[0].strip() title = title.split(" S1")[0].strip() title = title.split(" S2")[0].strip() itemlist.append( Item(channel=item.channel, extra=item.extra, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, contentSerieName=title, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazionazione patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>' next_page = scrapertools.find_single_match(data, patron) if next_page != "": itemlist.append( Item(channel=item.channel, action='episodes', contentType=item.contentType, title=support.typo(config.get_localized_string(30992), 'color kod bold'), url=next_page, args=item.args, thumbnail=support.thumb())) # support.videolibrary(itemlist,item,'bold color kod') return itemlist
def mainlist(item): log() itemlist = [] menu(itemlist, 'Serie TV', 'peliculas', host + '/serie-tv-streaming/', 'tvshow') menu(itemlist, 'Per Genere submenu', 'genre', host, 'tvshow', 'TV') menu(itemlist, 'Per Nazione submenu', 'nation', host + '/serie-tv-streaming/', 'tvshow', 'TV') menu(itemlist, 'Cerca...', 'search', contentType='episode', args='TV') autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) return itemlist
def search(item, texto): support.log(texto) item.url = "%s/index.php?do=search&story=%s&subaction=search" % (host, texto) try: if item.extra == "movie": return subIta(item) if item.extra == "tvshow": return peliculas_tv(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []
def mainlist(item): support.log() itemlist = [] support.menu(itemlist, 'Al Cinema', 'peliculas', host + '/cinema/') support.menu(itemlist, 'Ultimi Film Inseriti', 'peliculas', host) support.menu(itemlist, 'Film Sub-ITA', 'peliculas', host + '/sub-ita/') support.menu(itemlist, 'Film Ordine Alfabetico ', 'AZlist', host + '/catalog/') support.menu(itemlist, 'Categorie Film', 'categories', host) support.menu(itemlist, 'Cerca...', 'search') autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) return itemlist
def mainlist(item): log() itemlist = [] menu(itemlist, 'Anime Leggendari', 'peliculas', host + '/category/anime-leggendari/') menu(itemlist, 'Anime ITA', 'peliculas', host + '/category/anime-ita/') menu(itemlist, 'Anime SUB-ITA', 'peliculas', host + '/category/anime-sub-ita/') menu(itemlist, 'Anime Conclusi', 'peliculas', host + '/category/serie-anime-concluse/') menu(itemlist, 'Anime in Corso', 'peliculas', host + '/category/anime-in-corso/') menu(itemlist, 'Genere', 'genres', host) menu(itemlist, 'Cerca...', 'search') menu(itemlist, 'novita', 'newest') autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) return itemlist
def peliculas(item): support.log() itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())' matches = scrapertoolsV2.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches: info = scrapertoolsV2.find_multiple_matches( data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">' ) infoLabels = {} for infoLabels['year'], duration, scrapedplot, checkUrl in info: if checkUrl == scrapedurl: break infoLabels['duration'] = int(duration.replace( ' min', '')) * 60 # calcolo la durata in secondi scrapedthumbnail = host + scrapedthumbnail scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle) fulltitle = scrapedtitle if subDiv: fulltitle += support.typo(subText + ' _ () color limegreen') fulltitle += support.typo(scrapedquality.strip() + ' _ [] color kod') itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contenType, contentTitle=scrapedtitle, contentQuality=scrapedquality.strip(), plot=scrapedplot, title=fulltitle, fulltitle=scrapedtitle, show=scrapedtitle, url=scrapedurl, infoLabels=infoLabels, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.nextPage(itemlist, item, data, '<span>[^<]+</span>[^<]+<a href="(.*?)">') return itemlist
def episodios(item): item.contentType = 'episode' itemlist = [] data = httptools.downloadpage(item.url).data matches = scrapertoolsV2.find_multiple_matches( data, r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>' ) for match in matches: support.log(match) blocks = scrapertoolsV2.find_multiple_matches( match, '(?:<p>)(.*?)(?:</p>|<br)') season = scrapertoolsV2.find_single_match( match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip() for block in blocks: episode = scrapertoolsV2.find_single_match( block, r'([0-9]+(?:×|×)[0-9]+)').strip() seasons_n = scrapertoolsV2.find_single_match( block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip() if seasons_n: season = seasons_n if not episode: continue season = re.sub(r'–|–', "-", season) itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contentType, title="[B]" + episode + "[/B] " + season, fulltitle=episode + " " + season, show=episode + " " + season, url=block, extra=item.extra, thumbnail=item.thumbnail, infoLabels=item.infoLabels)) support.videolibrary(itemlist, item) return itemlist