def mainlist(item): logger.info("[GuardaSerieClick.py]==> mainlist") itemlist = [Item(channel=item.channel, action="nuoveserie", title=support.color("Nuove serie TV", "orange"), url="%s/lista-serie-tv" % host, thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"), Item(channel=item.channel, action="serietvaggiornate", title=support.color("Serie TV Aggiornate", "azure"), url="%s/lista-serie-tv" % host, thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"), Item(channel=item.channel, action="lista_serie", title=support.color("Anime", "azure"), url="%s/category/animazione/" % host, thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"), Item(channel=item.channel, action="categorie", title=support.color("Categorie", "azure"), url=host, thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"), Item(channel=item.channel, action="search", title=support.color("Cerca ...", "yellow"), extra="serie", thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")] return itemlist
def findvideos(item): logger.info('[animeleggendari.py] findvideos') data = httptools.downloadpage(item.url).data itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: server = re.sub(r'[-\[\]\s]+', '', videoitem.title) videoitem.title = "".join([ "[%s] " % support.color(server.capitalize(), 'orange'), item.title ]) videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = item.channel # Richiesto per Verifica se i link esistono if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Richiesto per FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Autoplay autoplay.start(itemlist, item) return itemlist
def mainlist(item): logger.info() itemlist = [ Item( channel=item.channel, action="lista_anime_completa", title=support.color("Lista Anime", "azure"), url="%s/lista-anime/" % host, thumbnail= "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png" ), Item( channel=item.channel, action="ultimiep", title=support.color("Ultimi Episodi", "azure"), url="%s/category/ultimi-episodi/" % host, thumbnail= "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png" ), Item( channel=item.channel, action="lista_anime", title=support.color("Anime in corso", "azure"), url="%s/category/anime-in-corso/" % host, thumbnail= "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png" ), Item( channel=item.channel, action="categorie", title=support.color("Categorie", "azure"), url="%s/generi/" % host, thumbnail= "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png" ), Item(channel=item.channel, action="search", title=support.color("Cerca anime ...", "yellow"), extra="anime", thumbnail= "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search") ] return itemlist
def findvideos(item): logger.info("kod.casacinema findvideos") data = item.url if item.extra == "tvshow" else httptools.downloadpage( item.url, headers=headers).data html = httptools.downloadpage(data).data patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(html) for url in matches: if url is not None: data = data else: continue itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize() videoitem.title = "".join( ["[%s] " % support.color(server, 'orange'), item.title]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = item.channel videoitem.contentType = item.contentType videoitem.language = IDIOMAS['Italiano'] # Requerido para Filtrar enlaces if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if item.contentType != 'episode': if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item(channel=item.channel, title= '[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def lista_anime_completa(item): logger.info() itemlist = [] p = 1 if '{}' in item.url: item.url, p = item.url.split('{}') p = int(p) data = httptools.downloadpage(item.url).data blocco = scrapertools.find_single_match( data, r'<ul class="lcp_catlist"[^>]+>(.*?)</ul>') patron = r'<a href="([^"]+)"[^>]+>([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(blocco) for i, (scrapedurl, scrapedtitle) in enumerate(matches): if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) cleantitle = scrapedtitle.replace("Sub Ita Streaming", "").replace("Ita Streaming", "") itemlist.append( Item(channel=item.channel, action="episodi", contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie", title=support.color(scrapedtitle, 'azure'), fulltitle=cleantitle, show=cleantitle, url=scrapedurl, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if len(matches) >= p * PERPAGE: scrapedurl = item.url + '{}' + str(p + 1) itemlist.append( Item( channel=item.channel, extra=item.extra, action="lista_anime_completa", title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]", url=scrapedurl, thumbnail= "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True)) return itemlist
def findvideos(item): logger.info() itemlist = [] headers = { 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0' } if item.extra: data = httptools.downloadpage(item.url, headers=headers).data blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra) item.url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>') patron = r'http:\/\/link[^a]+animesubita[^o]+org\/[^\/]+\/.*?(episodio\d*)[^p]+php(\?.*)' for phpfile, scrapedurl in re.findall(patron, item.url, re.DOTALL): url = "%s/%s.php%s" % (host, phpfile, scrapedurl) headers['Referer'] = url data = httptools.downloadpage(url, headers=headers).data # ------------------------------------------------ cookies = "" matches = re.compile( '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), re.DOTALL).findall(config.get_cookie_data()) for cookie in matches: name = cookie.split('\t')[5] value = cookie.split('\t')[6] cookies += name + "=" + value + ";" headers['Cookie'] = cookies[:-1] # ------------------------------------------------ scrapedurl = scrapertools.find_single_match( data, r'<source src="([^"]+)"[^>]+>') url = scrapedurl + '|' + urllib.urlencode(headers) itemlist.append( Item(channel=item.channel, action="play", text_color="azure", title="[%s] %s" % (support.color("Diretto", "orange"), item.title), fulltitle=item.fulltitle, url=url, thumbnail=item.thumbnail, fanart=item.thumbnail, plot=item.plot)) return itemlist
def findvideos(item): logger.info("[GuardaSerieClick.py]==> findvideos") itemlist = servertools.find_video_items(data=item.url) for videoitem in itemlist: server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize() videoitem.title = "".join(["[%s] " % support.color(server.capitalize(), 'orange'), item.title]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = item.channel return itemlist
def findepvideos(item): logger.info("[GuardaSerieClick.py]==> findepvideos") data = httptools.downloadpage(item.url, headers=headers).data data = scrapertools.find_single_match(data, item.extra) itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize() videoitem.title = "".join(["[%s] " % support.color(server.capitalize(), 'orange'), item.title]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = item.channel return itemlist
def episodios(item): logger.info('[animeleggendari.py] episodios') itemlist = [] data = httptools.downloadpage(item.url).data blocco = scrapertools.find_single_match( data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>' ) # Il primo episodio è la pagina stessa itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contentType, title="Episodio: 1", text_color="azure", fulltitle="%s %s %s " % (support.color(item.title, "deepskyblue"), support.color("|", "azure"), support.color("1", "orange")), url=item.url, thumbnail=item.thumbnail, folder=True)) if blocco != "": patron = r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapednumber in matches: itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contentType, title="Episodio: %s" % scrapednumber, text_color="azure", fulltitle="%s %s %s " % (support.color(item.title, "deepskyblue"), support.color("|", "azure"), support.color(scrapednumber, "orange")), url=scrapedurl, thumbnail=item.thumbnail, folder=True)) if config.get_videolibrary_support() and len(itemlist) != 0: itemlist.append( Item(channel=item.channel, title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161), url=item.url, action="add_serie_to_library", extra="episodi", show=item.show)) return itemlist
def ultimiep(item): logger.info("ultimiep") itemlist = lista_anime(item, False, False) for itm in itemlist: title = scrapertools.decodeHtmlentities(itm.title) # Pulizia titolo title = title.replace("Streaming", "").replace("&", "") title = title.replace("Download", "") title = title.replace("Sub Ita", "").strip() eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))") cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip() # Creazione URL url = re.sub(r'%s-?\d*-' % eptype.lower(), '', itm.url) if "-streaming" not in url: url = url.replace("sub-ita", "sub-ita-streaming") epnumber = "" if 'episodio' in eptype.lower(): epnumber = scrapertools.find_single_match(title.lower(), r'episodio?\s*(\d+)') eptype += ":? " + epnumber extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype itm.title = support.color(title, 'azure').strip() itm.action = "findvideos" itm.url = url itm.fulltitle = cleantitle itm.extra = extra itm.show = re.sub(r'Episodio\s*', '', title) itm.thumbnail = item.thumbnail tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def lista_anime(item, nextpage=True, show_lang=True): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data blocco = scrapertools.find_single_match( data, r'<div class="post-list group">(.*?)</nav><!--/.pagination-->') # patron = r'<a href="([^"]+)" title="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>' # Patron con thumbnail, Kodi non scarica le immagini dal sito patron = r'<a href="([^"]+)" title="([^"]+)">' matches = re.compile(patron, re.DOTALL).findall(blocco) for scrapedurl, scrapedtitle in matches: scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle) # Pulizia titolo scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "") scrapedtitle = scrapedtitle.replace("Download", "") lang = scrapertools.find_single_match( scrapedtitle, r"([Ss][Uu][Bb]\s*[Ii][Tt][Aa])") scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip() eptype = scrapertools.find_single_match(scrapedtitle, "((?:Episodio?|OAV))") cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', scrapedtitle) cleantitle = cleantitle.replace(lang, "").strip() itemlist.append( Item(channel=item.channel, action="episodi", contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie", title=color( scrapedtitle.replace( lang, "(%s)" % support.color(lang, "red") if show_lang else "").strip(), 'azure'), fulltitle=cleantitle, url=scrapedurl, show=cleantitle, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if nextpage: patronvideos = r'<link rel="next" href="([^"]+)"\s*/>' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: scrapedurl = matches[0] itemlist.append( Item( channel=item.channel, action="lista_anime", title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]", url=scrapedurl, thumbnail= "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True)) return itemlist