def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>') itemlist.append( Item(channel=item.channel, action='findvideos', contentType=item.contentType, title=support.typo('Episodio 1 bold'), fulltitle=item.title, url=item.url, thumbnail=item.thumbnail)) if block: matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data) for url, number in matches: itemlist.append( Item(channel=item.channel, action='findvideos', contentType=item.contentType, title=support.typo('Episodio ' + number,'bold'), fulltitle=item.title, url=url, thumbnail=item.thumbnail)) autorenumber.renumber(itemlist, item) support.videolibrary return itemlist
def peliculas(item): log() itemlist = [] blacklist = ['top 10 anime da vedere'] matches, data = support.match( item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+' ) for url, title, thumb in matches: title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace( "streaming", "") lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))") videoType = '' if 'movie' in title.lower(): videoType = ' - (MOVIE)' if 'ova' in title.lower(): videoType = ' - (OAV)' cleantitle = title.replace(lang, "").replace( '(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip() if not videoType: contentType = "tvshow" action = "episodios" else: contentType = "movie" action = "findvideos" if not title.lower() in blacklist: itemlist.append( Item(channel=item.channel, action=action, contentType=contentType, title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang, '_ [] color kod'), fulltitle=cleantitle, show=cleantitle, url=url, thumbnail=thumb)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">') return itemlist
def mainlist(item): logger.info(" mainlist") itemlist = [Item(channel=item.channel, title=support.typo("IN ONDA ADESSO bold color kod"), action="tvoggi", url="%s/filmtv/" % host, thumbnail=""), Item(channel=item.channel, title="Mattina", action="tvoggi", url="%s/filmtv/oggi/mattina/" % host, thumbnail=""), Item(channel=item.channel, title="Pomeriggio", action="tvoggi", url="%s/filmtv/oggi/pomeriggio/" % host, thumbnail=""), Item(channel=item.channel, title="Sera", action="tvoggi", url="%s/filmtv/oggi/sera/" % host, thumbnail=""), Item(channel=item.channel, title="Notte", action="tvoggi", url="%s/filmtv/oggi/notte/" % host, thumbnail="")] return itemlist
def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">' url = scrapertoolsV2.find_single_match(data, patron).replace("?seriehd", "") seasons = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] for season_url, season in seasons: season_url = urlparse.urljoin(url, season_url) episodes = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) itemlist.append( Item(channel=item.channel, action="findvideos", contentType="episode", title=support.typo(title + ' - ' + item.show, 'bold'), url=episode_url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail)) support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def peliculas(item): support.log() itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())' matches = scrapertoolsV2.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches: info = scrapertoolsV2.find_multiple_matches( data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">' ) infoLabels = {} for infoLabels['year'], duration, scrapedplot, checkUrl in info: if checkUrl == scrapedurl: break infoLabels['duration'] = int(duration.replace( ' min', '')) * 60 # calcolo la durata in secondi scrapedthumbnail = host + scrapedthumbnail scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle) fulltitle = scrapedtitle if subDiv: fulltitle += support.typo(subText + ' _ () color limegreen') fulltitle += support.typo(scrapedquality.strip() + ' _ [] color kod') itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contenType, contentTitle=scrapedtitle, contentQuality=scrapedquality.strip(), plot=scrapedplot, title=fulltitle, fulltitle=scrapedtitle, show=scrapedtitle, url=scrapedurl, infoLabels=infoLabels, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.nextPage(itemlist, item, data, '<span>[^<]+</span>[^<]+<a href="(.*?)">') return itemlist
def renumber(itemlist, item='', typography=''): log() if item: try: dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE) SERIES = dict_series[item.show.rstrip()]['season_episode'] S = SERIES[0] E = SERIES[1] SP = SERIES[2] ID = SERIES[3] page = 1 epList = [] exist = True item.infoLabels['tvdb_id'] = ID tvdb.set_infoLabels_item(item) while exist: data = tvdb.otvdb_global.get_list_episodes(ID, page) if data: for episodes in data['data']: if episodes['airedSeason'] >= S: if E == 0: epList.append([0, SP]) E = 1 if episodes['airedEpisodeNumber'] >= E: epList.append([ episodes['airedSeason'], episodes['airedEpisodeNumber'] ]) page = page + 1 else: exist = False epList.sort() ep = 0 for item in itemlist: s = str(epList[ep][0]) e = str(epList[ep][1]) item.title = typo(s + 'x' + e + ' - ', typography) + item.title ep = ep + 1 except: return itemlist else: for item in itemlist: if item.contentType != 'movie': if item.context: context2 = item.context item.context = context() + context2 else: item.context = context() return itemlist
def peliculas_tv(item): logger.info("icarus serietvsubita peliculas_tv") itemlist = [] data = httptools.downloadpage(item.url).data logger.debug(data) patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: if "FACEBOOK" in scrapedtitle or "RAPIDGATOR" in scrapedtitle: continue if scrapedtitle == "WELCOME!": continue scrapedthumbnail = "" scrapedplot = "" scrapedtitle = cleantitle(scrapedtitle) title = scrapedtitle.split(" S0")[0].strip() title = title.split(" S1")[0].strip() title = title.split(" S2")[0].strip() itemlist.append( Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentSerieName=title, plot=scrapedplot, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>' next_page = scrapertools.find_single_match(data, patron) if next_page != "": if item.extra == "search_tv": next_page = next_page.replace('&', '&') itemlist.append( Item(channel=item.channel, action='peliculas_tv', contentType=item.contentType, title=support.typo(config.get_localized_string(30992), 'color kod bold'), url=next_page, args=item.args, extra=item.extra, thumbnail=support.thumb())) return itemlist
def lista_serie(item): support.log(item.channel + " lista_serie") itemlist = [] PERPAGE = 15 p = 1 if '{}' in item.url: item.url, p = item.url.split('{}') p = int(p) # Descarga la pagina data = httptools.downloadpage(item.url).data # Extrae las entradas patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" scrapedthumbnail = "" if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break title = cleantitle(scrapedtitle) itemlist.append( Item(channel=item.channel, extra=item.extra, action="episodes", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title, plot=scrapedplot, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione if len(matches) >= p * PERPAGE: scrapedurl = item.url + '{}' + str(p + 1) itemlist.append( Item(channel=item.channel, action='lista_serie', contentType=item.contentType, title=support.typo(config.get_localized_string(30992), 'color kod bold'), url=scrapedurl, args=item.args, thumbnail=support.thumb())) return itemlist
def episodes(item): support.log(item.channel + " episodes") itemlist = [] data = httptools.downloadpage(item.url).data patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?' patron += '<p><a href="([^"]+)">' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = cleantitle(scrapedtitle) title = scrapedtitle.split(" S0")[0].strip() title = title.split(" S1")[0].strip() title = title.split(" S2")[0].strip() itemlist.append( Item(channel=item.channel, extra=item.extra, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, contentSerieName=title, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazionazione patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>' next_page = scrapertools.find_single_match(data, patron) if next_page != "": itemlist.append( Item(channel=item.channel, action='episodes', contentType=item.contentType, title=support.typo(config.get_localized_string(30992), 'color kod bold'), url=next_page, args=item.args, thumbnail=support.thumb())) # support.videolibrary(itemlist,item,'bold color kod') return itemlist
def episodios(item): support.log(item.channel + " episodios") itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data patron = r'<option value="(\d+)"[\sselected]*>.*?</option>' matches = re.compile(patron, re.DOTALL).findall(data) for value in matches: patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value blocco = scrapertools.find_single_match(data, patron) patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(blocco) for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches: number = scrapertools.decodeHtmlentities( scrapedtitle.replace("Episodio", "")).strip() itemlist.append( Item(channel=item.channel, action="findvideos", title=value + "x" + number.zfill(2), fulltitle=scrapedtitle, contentType="episode", url=scrapedurl, thumbnail=scrapedimg, extra=scrapedextra, folder=True)) if config.get_videolibrary_support() and len(itemlist) != 0: itemlist.append( Item(channel=item.channel, title=support.typo( config.get_localized_string(30161) + ' bold color kod'), thumbnail=support.thumb(), url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.fulltitle, show=item.show)) return itemlist
def mainlist(item): support.log(item.channel + 'mainlist') itemlist = [] support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow') support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host, 'tvshow') support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow') support.menu(itemlist, 'Cerca', 'search', host, 'tvshow') autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) itemlist.append( Item(channel='setting', action="channel_config", title=support.typo("Configurazione Canale color lime"), config=item.channel, folder=False, thumbnail=channelselector.get_thumb('setting_0.png'))) return itemlist
def mainlist(item): support.log(item.channel + 'mainlist') itemlist = [] support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host, 'tvshow') support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host, 'tvshow') # support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow') support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow') support.menu(itemlist, 'Cerca', 'search', host, 'tvshow') # autoplay.init(item.channel, list_servers, list_quality) # autoplay.show_option(item.channel, itemlist) itemlist.append( Item(channel='setting', action="channel_config", title=support.typo("Configurazione Canale color lime"), config=item.channel, folder=False, thumbnail=channelselector.get_thumb('setting_0.png'))) return itemlist
def last(item): support.log() itemlist = [] infoLabels = {} quality = '' matches = support.match( item, r'<ahref=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0] for url, title, info in matches: title = title.rstrip() infoLabels['year'] = scrapertoolsV2.find_single_match( info, r'\(([0-9]+)\)') quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]') if quality: longtitle = title + support.typo(quality, '_ [] color kod') else: longtitle = title itemlist.append( Item(channel=item.channel, action='findvideos', contentType=item.contentType, title=longtitle, fulltitle=title, show=title, quality=quality, url=url, infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist