def menu(itemlist, title='', action='', url='', contentType='movie', args=[]): # Function to simplify menu creation frame = inspect.stack()[1] filename = frame[0].f_code.co_filename filename = os.path.basename(filename).replace('.py', '') # Call typo function title = typo(title) if contentType == 'movie': extra = 'movie' else: extra = 'tvshow' itemlist.append( Item(channel=filename, title=title, action=action, url=url, extra=extra, args=args, contentType=contentType)) # Apply auto Thumbnails at the menus from channelselector import thumb thumb(itemlist) return itemlist
def menuItem(itemlist, filename, title='', action='', url='', contentType='movie', args=[]): # Function to simplify menu creation # Call typo function title = typo(title) if contentType == 'movie': extra = 'movie' else: extra = 'tvshow' itemlist.append( Item(channel=filename, title=title, action=action, url=url, extra=extra, args=args, contentType=contentType)) # Apply auto Thumbnails at the menus from channelselector import thumb thumb(itemlist) return itemlist
def download(itemlist, item, typography='', function_level=1, function=''): if not typography: typography = 'color kod bold' if item.contentType == 'movie': from_action = 'findvideos' title = typo(config.get_localized_string(60354), typography) elif item.contentType == 'episode': from_action = 'findvideos' title = typo(config.get_localized_string(60356), typography) + ' - ' + item.title else: from_action = 'episodios' title = typo(config.get_localized_string(60355), typography) function = function if function else inspect.stack()[function_level][3] contentSerieName = item.contentSerieName if item.contentSerieName else '' contentTitle = item.contentTitle if item.contentTitle else '' if itemlist and item.contentChannel != 'videolibrary': itemlist.append( Item(channel='downloads', from_channel=item.channel, title=title, fulltitle=item.fulltitle, show=item.fulltitle, contentType=item.contentType, contentSerieName=contentSerieName, url=item.url, action='save_download', from_action=from_action, contentTitle=contentTitle, path=item.path, thumbnail=thumb(thumb='downloads.png'))) if from_action == 'episodios': itemlist.append( Item(channel='downloads', from_channel=item.channel, title=typo(config.get_localized_string(60357), typography), fulltitle=item.fulltitle, show=item.fulltitle, contentType=item.contentType, contentSerieName=contentSerieName, url=item.url, action='save_download', from_action=from_action, contentTitle=contentTitle, download='season', thumbnail=thumb(thumb='downloads.png'))) return itemlist
def search_page(item): itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data patron = r'<img src="([^"]+)".*?.*?<a href="([^"]+)">(.*?)<\/a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle in matches: scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) patron = '<a class=' + "'arrow_pag'" + ' href="([^"]+)"' next_page = scrapertools.find_single_match(data, patron) if next_page != "": itemlist.append( Item(channel=item.channel, action="search_page", title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]", url=next_page, thumbnail=thumb())) return itemlist
def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page='', resub=[]): # Function_level is useful if the function is called by another function. # If the call is direct, leave it blank action = inspect.stack()[function_or_level][3] if type( function_or_level) == int else function_or_level if next_page == '': next_page = scrapertools.find_single_match(data, patron) if next_page != "": if resub: next_page = re.sub(resub[0], resub[1], next_page) if 'http' not in next_page: next_page = scrapertools.find_single_match( item.url, 'https?://[a-z0-9.-]+') + next_page next_page = re.sub('&', '&', next_page) log('NEXT= ', next_page) itemlist.append( Item(channel=item.channel, action=action, contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), url=next_page, args=item.args, thumbnail=thumb())) return itemlist
def mainlist(item): logger.info("[altadefinizionehd.py] mainlist") autoplay.init(item.channel, list_servers, list_quality) itemlist = [ Item(channel=item.channel, action="video", title="[B]Film[/B]", url=host + '/movies/', thumbnail=NovitaThumbnail, fanart=FilmFanart), Item(channel=item.channel, action="menu", title="[B] > Film per Genere[/B]", url=host, extra='GENERE', thumbnail=NovitaThumbnail, fanart=FilmFanart), Item(channel=item.channel, action="menu", title="[B] > Film per Anno[/B]", url=host, extra='ANNO', thumbnail=NovitaThumbnail, fanart=FilmFanart), Item(channel=item.channel, action="video", title="Film Sub-Ita", url=host + "/genre/sub-ita/", thumbnail=NovitaThumbnail, fanart=FilmFanart), Item(channel=item.channel, action="video", title="Film Rip", url=host + "/genre/dvdrip-bdrip-brrip/", thumbnail=NovitaThumbnail, fanart=FilmFanart), Item(channel=item.channel, action="video", title="Film al Cinema", url=host + "/genre/cinema/", thumbnail=NovitaThumbnail, fanart=FilmFanart), Item(channel=item.channel, action="search", extra="movie", title="[COLOR blue]Cerca Film...[/COLOR]", thumbnail=CercaThumbnail, fanart=FilmFanart) ] autoplay.show_option(item.channel, itemlist) itemlist = thumb(itemlist) return itemlist
def genre(item): itemlist = support.scrape(item, '<a href="([^"]+)">([^<]+)</a>', ['url', 'title'], headers, [ 'Serie TV', 'Serie TV Americane', 'Serie TV Italiane', 'altadefinizione' ], action='peliculas') return thumb(itemlist)
def mainlist(item): logger.info("kod.casacinema mainlist") autoplay.init(item.channel, list_servers, list_quality) itemlist = [ Item(channel=item.channel, title="[B]Film[/B]", action="peliculas", extra="movie", url="%s/category/film" % host), Item(channel=item.channel, title="[B]Film - HD[/B]", action="peliculas", extra="movie", url="%s/?s=[HD]" % host), Item(channel=item.channel, title="[B] > Categorie[/B]", action="categorias", extra="movie", url="%s/category/film" % host), Item(channel=item.channel, title="[B]Film Sub - Ita[/B]", action="peliculas", extra="movie", url="%s/category/sub-ita" % host), Item( channel=item.channel, title="[COLOR blue]Cerca Film...[/COLOR]", action="search", extra="movie", ), Item(channel=item.channel, title="[B]Serie TV[/B]", extra="tvshow", action="peliculas_tv", url="%s/category/serie-tv" % host), Item(channel=item.channel, title="[B]Aggiornamenti Serie TV[/B]", action="update_tv", url="%s/aggiornamenti-serie-tv" % host, extra="tvshow"), Item(channel=item.channel, title="[COLOR blue]Cerca Serie TV...[/COLOR]", action="search", extra="tvshow") ] autoplay.show_option(item.channel, itemlist) # auto thumb itemlist = thumb(itemlist) return itemlist
def serietv(item): logger.info("kod.eurostreaming peliculas") itemlist = [] # Carica la pagina data = httptools.downloadpage(item.url).data # Estrae i contenuti patron = '<div class="post-thumb">\s*<a href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = scrapertools.decodeHtmlentities( scrapedtitle.replace("Streaming", "")) if scrapedtitle.startswith("Link to "): scrapedtitle = scrapedtitle[8:] # num = scrapertools.find_single_match(scrapedurl, '(-\d+/)') # if num: # scrapedurl = scrapedurl.replace(num, "-episodi/") itemlist.append( Item(channel=item.channel, action="episodios", contentType="tvshow", title=scrapedtitle, fulltitle=scrapedtitle, text_color="azure", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, extra=item.extra, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione patronvideos = '<a class="next page-numbers" href="?([^>"]+)">Avanti »</a>' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: scrapedurl = urlparse.urljoin(item.url, matches[0]) itemlist.append( Item(channel=item.channel, action="serietv", title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]", url=scrapedurl, thumbnail=thumb(), extra=item.extra, folder=True)) return itemlist
def top_imdb(item, contentType='movie', regex=r'<h1.*?TOP IMDb.*?<h3>(.*?)<h3>'): logger.info("[mondolunatico2.py] top_imdb") itemlist = [] minpage = 20 p = 1 if '{}' in item.url: item.url, p = item.url.split('{}') p = int(p) data = httptools.downloadpage(item.url, headers=headers).data block = scrapertools.find_single_match(data, regex) patron = r"<div class='image'><div class='[^']+'><a href='([^']+)'[^']+'([^']+)'[^']+'([^']+)" matches = re.compile(patron, re.DOTALL).findall(block) for i, (scrapedurl, scrapedthumbnail, scrapedtitle) in enumerate(matches): if (p - 1) * minpage > i: continue if i >= p * minpage: break scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) scrapedtitle = re.sub(r'[0-9]{4}', "", scrapedtitle) scrapedthumbnail = scrapedthumbnail.replace("-90x135", "").replace( "/w92/", "/w600_and_h900_bestv2/") itemlist.append( Item( channel=channel, action="findvideos" if "movie" in contentType else "episodios", contentType=item.contentType, contentTitle=scrapedtitle, title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, show=scrapedtitle, thumbnail=scrapedthumbnail, args=item.args)) if len(matches) >= p * minpage: thumbnail = thumb(itemlist=[]) scrapedurl = item.url + '{}' + str(p + 1) itemlist.append( Item(channel=channel, contentType=item.contentType, action="top_imdb", title="[COLOR blue][B]Successivo >[/B][/COLOR]", thumbnail=thumbnail, url=scrapedurl)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def pagination(itemlist, item, page, perpage, function_level=1): if len(itemlist) >= page * perpage: itemlist.append( Item(channel=item.channel, action=inspect.stack()[function_level][3], contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), url=item.url, args=item.args, page=page + 1, thumbnail=thumb())) return itemlist
def video(item): logger.info("[altadefinizionehd.py] video") itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data logger.info("[altadefinizionehd.py] Data" + data) if 'archive-content' in data: regex = r'<div id="archive-content".*?>(.*?)<div class="pagination' else: regex = r'<div class="items".*?>(.*?)<div class="pagination' block = scrapertools.find_single_match(data, regex) logger.info("[altadefinizionehd.py] Block" + block) patron = r'<article .*?class="item movies">.*?<img src="([^"]+)".*?<span class="quality">(.*?)<\/span>.*?<a href="([^"]+)">.*?<h4>([^<]+)<\/h4>(.*?)<\/article>' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedthumb, scrapedquality, scrapedurl, scrapedtitle, scrapedinfo in matches: title = scrapedtitle + " [" + scrapedquality + "]" patron = r'IMDb: (.*?)<\/span> <span>(.*?)<\/span>.*?"texto">(.*?)<\/div>' matches = re.compile(patron, re.DOTALL).findall(scrapedinfo) logger.info("[altadefinizionehd.py] MATCHES" + str(matches)) for rating, year, plot in matches: infoLabels = {} infoLabels['Year'] = year infoLabels['Rating'] = rating infoLabels['Plot'] = plot itemlist.append( Item(channel=item.channel, action="findvideos", contentType="movie", title=title, fulltitle=scrapedtitle, infoLabels=infoLabels, url=scrapedurl, thumbnail=scrapedthumb)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) patron = '<a class=' + "'arrow_pag'" + ' href="([^"]+)"' next_page = scrapertools.find_single_match(data, patron) if next_page != "": itemlist.append( Item(channel=item.channel, action="video", title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]", url=next_page, thumbnail=thumb())) return itemlist
def genres_menu(item): itemlist = [] mode = item.mode.replace('show', '') genres = tmdb.get_genres(mode) for key, value in list(genres[mode].items()): discovery = { 'url': 'discover/%s' % mode, 'with_genres': key, 'language': def_lang, 'page': '1' } itemlist.append( Item(channel=item.channel, title=typo(value, 'bold'), page=1, action='discover_list', discovery=discovery, mode=item.mode)) channelselector.thumb(itemlist) return sorted(itemlist, key=lambda it: it.title)
def peliculas(item): logger.info(item.channel + 'peliculas') itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data block = scrapertoolsV2.find_single_match(data, r'<ul class="posts">(.*)<\/ul>') patron = r'<li><a href="([^"]+)" data-thumbnail="([^"]+)">.*?<div class="title">([^<]+)<\/div>' matches = scrapertoolsV2.find_multiple_matches(block, patron) for scrapedurl, scrapedthumb, scrapedtitle in matches: title = re.sub(r'.\(.*?\)|.\[.*?\]', '', scrapedtitle) quality = scrapertoolsV2.find_single_match(scrapedtitle, r'\[(.*?)\]') if not quality: quality = 'SD' longtitle = title + ' [COLOR blue][' + quality + '][/COLOR]' if item.contentType == 'episode': action = 'episodios' else: action = 'findvideos' itemlist.append( Item(channel=item.channel, action=action, contentType=item.contentType, title=longtitle, fulltitle=title, show=title, quality=quality, url=scrapedurl, thumbnail=scrapedthumb)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) next_page = scrapertoolsV2.find_single_match(data, '<a href="([^"]+)">Pagina') if next_page != "": itemlist.append( Item(channel=item.channel, action="peliculas", contentType=item.contentType, title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]", url=next_page, thumbnails=thumb())) return itemlist
def genre(item): logger.info(item.channel + 'genre') itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data block = scrapertoolsV2.find_single_match(data, r'<ul class="table-list">(.*?)<\/ul>') matches = scrapertoolsV2.find_multiple_matches(block, r'<a href="([^"]+)">.*?<\/span>(.*?)<\/a>') for url, title in matches: itemlist.append( Item(channel=item.channel, action='peliculas', title=title, url=host+url) ) itemlist = thumb(itemlist) return itemlist
def az(item): logger.info(item.channel + 'genre') itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data block = scrapertoolsV2.find_single_match(data, r'<select class="cats">(.*?)<\/select>') matches = scrapertoolsV2.find_multiple_matches(block, r'<option data-src="([^"]+)">(.*?)<\/option>') for url, title in matches: itemlist.append( Item(channel=item.channel, action='peliculas', title=title, url=url) ) itemlist = thumb(itemlist) return itemlist
def peliculas(item): logger.info(item.channel + 'peliculas') itemlist = [] if item.contentType == 'movie': action = 'findvideos' else: action = 'episodios' page = 1 if '{}' in item.url: item.url, page = item.url.split('{}') page = int(page) data = httptools.downloadpage(item.url, headers=headers).data block = scrapertoolsV2.find_single_match( data, r'id="lcp_instance_0">(.*?)<\/ul>') matches = re.compile(r'<a\s*href="([^"]+)" title="([^<]+)">[^<]+</a>', re.DOTALL).findall(block) for i, (url, title) in enumerate(matches): if (page - 1) * PERPAGE > i: continue if i >= page * PERPAGE: break title = scrapertoolsV2.decodeHtmlentities(title) itemlist.append( Item(channel=item.channel, action=action, title=title, contentTitle=title, fulltitle=title, url=url, contentType=item.contentType, show=title)) if len(matches) >= page * PERPAGE: url = item.url + '{}' + str(page + 1) itemlist.append( Item(channel=item.channel, action="peliculas", title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]", url=url, thumbnail=thumb(), contentType=item.contentType)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def build_sub_menu(item): itemlist = [] matches = re.compile( r'<input.*?name="(.*?)" value="(.*?)".*?><label.*?>(.*?)<\/label>', re.DOTALL).findall(item.html) for name, value, title in matches: itemlist.append( Item(channel=item.channel, action='video', contentType="tvshow", title='[B]' + title + ' >[/B]', fulltitle=title, show=title, url=item.url + '&' + name + '=' + value, plot="")) itemlist = thumb(itemlist) return itemlist
def nextPage(itemlist, item, data, patron, function_level=1): # Function_level is useful if the function is called by another function. # If the call is direct, leave it blank next_page = scrapertoolsV2.find_single_match(data, patron) log('NEXT= ', next_page) if next_page != "": itemlist.append( Item(channel=item.channel, action=inspect.stack()[function_level][3], contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), url=next_page, args=item.args, thumbnail=thumb())) return itemlist
def build_menu(item): itemlist = [] itemlist.append( Item(channel=item.channel, action="video", title="[B]Tutti[/B]", url=item.url)) data = httptools.downloadpage(item.url).data data = re.sub(r'\n|\t', '', data) data = re.sub(r'>\s*<', '><', data) block = scrapertoolsV2.find_single_match( data, r'<form class="filters.*?>(.*?)<\/form>') matches = re.compile( r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>', re.DOTALL).findall(block) for title, html in matches: itemlist.append( Item(channel=item.channel, action='build_sub_menu', contentType="tvshow", title='[B] > ' + title + '[/B]', fulltitle=title, show=title, url=item.url, html=html, thumbnail=item.thumbnail, fanart=item.fanart)) # Elimina FLingua dal Menu itemlist.pop(6) itemlist.pop(6) itemlist = thumb(itemlist) return itemlist
def mainlist(item): logger.info("kod.eurostreaming mainlist") autoplay.init(item.channel, list_servers, list_quality) itemlist = [ Item( channel=item.channel, title="[B]Serie TV[/B]", action="serietv", extra="tvshow", url="%s/category/serie-tv-archive/" % host, thumbnail= "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png" ), Item( channel=item.channel, title="[B]Anime / Cartoni[/B]", action="serietv", extra="tvshow", url="%s/category/anime-cartoni-animati/" % host, thumbnail= "http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png" ), Item( channel=item.channel, title="[COLOR blue]Cerca...[/COLOR]", action="search", extra="tvshow", thumbnail= "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search") ] autoplay.show_option(item.channel, itemlist) itemlist = thumb(itemlist) return itemlist
def video(item): logger.info("[animeworld.py] video") itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'\n|\t', '', data) data = re.sub(r'>\s*<', '><', data) patron = r'<a href="([^"]+)" class="poster.*?><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumb, scrapedinfo, scrapedoriginal, scrapedtitle in matches: # Cerca Info come anno o lingua nel Titolo year = '' lang = '' if '(' in scrapedtitle: year = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([0-9]+\))') lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))') # Rimuove Anno e Lingua nel Titolo title = scrapedtitle.replace(year, '').replace(lang, '') original = scrapedoriginal.replace(year, '').replace(lang, '') # Compara Il Titolo con quello originale if original == title: original = '' else: original = ' - [ ' + scrapedoriginal + ' ]' # cerca info supplementari ep = '' ep = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ep">(.*?)<') if ep != '': ep = ' - ' + ep ova = '' ova = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ova">(.*?)<') if ova != '': ova = ' - (' + ova + ')' ona = '' ona = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ona">(.*?)<') if ona != '': ona = ' - (' + ona + ')' movie = '' movie = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="movie">(.*?)<') if movie != '': movie = ' - (' + movie + ')' special = '' special = scrapertoolsV2.find_single_match( scrapedinfo, '<div class="special">(.*?)<') if special != '': special = ' - (' + special + ')' # Concatena le informazioni info = ep + lang + year + ova + ona + movie + special # Crea il title da visualizzare long_title = '[B]' + title + '[/B]' + info + original # Controlla se sono Episodi o Film if movie == '': contentType = 'tvshow' action = 'episodios' else: contentType = 'movie' action = 'findvideos' itemlist.append( Item(channel=item.channel, contentType=contentType, action=action, title=long_title, url=scrapedurl, fulltitle=title, show=title, thumbnail=scrapedthumb, context=autoplay.context)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) # Next page next_page = scrapertoolsV2.find_single_match( data, '<a class="page-link" href=".*?page=([^"]+)" rel="next"') if next_page != '': itemlist.append( Item(channel=item.channel, action='video', title='[B]' + config.get_localized_string(30992) + ' >[/B]', url=re.sub('&page=([^"]+)', '', item.url) + '&page=' + next_page, contentType=item.contentType, thumbnail=thumb())) return itemlist
def lista_anime(item): logger.info("[animeworld.py] lista_anime") itemlist = [] # Carica la pagina data = httptools.downloadpage(item.url).data data = re.sub(r'\n|\t', '', data) data = re.sub(r'>\s*<', '><', data) # Estrae i contenuti patron = r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches: if scrapedoriginal == scrapedtitle: scrapedoriginal = '' else: scrapedoriginal = ' - [ ' + scrapedoriginal + ' ]' year = '' lang = '' if '(' in scrapedtitle: year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))') lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))') title = scrapedtitle.replace(year, '').replace(lang, '') original = scrapedoriginal.replace(year, '').replace(lang, '') title = '[B]' + title + '[/B]' + year + lang + original itemlist.append( Item(channel=item.channel, extra=item.extra, contentType="tvshow", action="episodios", text_color="azure", title=title, url=scrapedurl, thumbnail=scrapedthumb, fulltitle=title, show=title, plot=scrapedplot, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) # Next page next_page = scrapertoolsV2.find_single_match( data, '<a class="page-link" href="([^"]+)" rel="next"') if next_page != '': itemlist.append( Item(channel=item.channel, action='lista_anime', title='[B]' + config.get_localized_string(30992) + ' >[/B]', url=next_page, contentType=item.contentType, thumbnail=thumb())) return itemlist
def wrapper(*args): itemlist = [] args = func(*args) function = func.__name__ if not 'actLike' in args else args['actLike'] # log('STACK= ',inspect.stack()[1][3]) item = args['item'] action = args['action'] if 'action' in args else 'findvideos' anime = args['anime'] if 'anime' in args else '' addVideolibrary = args[ 'addVideolibrary'] if 'addVideolibrary' in args else True search = args['search'] if 'search' in args else '' blacklist = args['blacklist'] if 'blacklist' in args else [] data = args['data'] if 'data' in args else '' patron = args['patron'] if 'patron' in args else args[ 'patronMenu'] if 'patronMenu' in args else '' if 'headers' in args: headers = args['headers'] elif 'headers' in func.__globals__: headers = func.__globals__['headers'] else: headers = '' patronNext = args['patronNext'] if 'patronNext' in args else '' patronBlock = args['patronBlock'] if 'patronBlock' in args else '' typeActionDict = args[ 'typeActionDict'] if 'typeActionDict' in args else {} typeContentDict = args[ 'typeContentDict'] if 'typeContentDict' in args else {} debug = args['debug'] if 'debug' in args else False debugBlock = args['debugBlock'] if 'debugBlock' in args else False if 'pagination' in args and inspect.stack()[1][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes' ]: pagination = args['pagination'] if args['pagination'] else 20 else: pagination = '' lang = args['deflang'] if 'deflang' in args else '' pag = item.page if item.page else 1 # pagination matches = [] log('PATRON= ', patron) if not data: page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) # if url may be changed and channel has findhost to update if (not page.data or scrapertools.get_domain_from_url( page.url) != scrapertools.get_domain_from_url(item.url) ) and 'findhost' in func.__globals__: host = func.__globals__['findhost']() parse = list(urlparse.urlparse(item.url)) from core import jsontools jsontools.update_node(host, func.__module__.split('.')[-1], 'url') parse[1] = scrapertools.get_domain_from_url(host) item.url = urlparse.urlunparse(parse) page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) data = page.data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about if patronBlock: if debugBlock: regexDbg(item, patronBlock, headers, data) blocks = scrapertools.find_multiple_matches_groups( data, patronBlock) block = "" for bl in blocks: # log(len(blocks),bl) if 'season' in bl and bl['season']: item.season = bl['season'] blockItemlist, blockMatches = scrapeBlock( item, args, bl['block'], patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) for it in blockItemlist: if 'lang' in bl: it.contentLanguage, it.title = scrapeLang( bl, it.contentLanguage, it.title) if 'quality' in bl and bl['quality']: it.quality = bl['quality'].strip() it.title = it.title + typo(bl['quality'].strip(), '_ [] color kod') itemlist.extend(blockItemlist) matches.extend(blockMatches) elif patron: itemlist, matches = scrapeBlock(item, args, data, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) if 'itemlistHook' in args: itemlist = args['itemlistHook'](itemlist) if (pagination and len(matches) <= pag * pagination ) or not pagination: # next page with pagination if patronNext and inspect.stack()[1][3] != 'newest': nextPage(itemlist, item, data, patronNext, function) # next page for pagination if pagination and len(matches) > pag * pagination and not search: if inspect.stack()[1][3] != 'get_newest': itemlist.append( Item(channel=item.channel, action=item.action, contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), fulltitle=item.fulltitle, show=item.show, url=item.url, args=item.args, page=pag + 1, thumbnail=thumb())) if action != 'play' and function != 'episodios' and 'patronMenu' not in args: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) from specials import autorenumber if anime: if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold') else: autorenumber.renumber(itemlist) if anime and autorenumber.check( item) == False and not scrapertools.find_single_match( itemlist[0].title, r'(\d+.\d+)'): pass else: if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): # item.fulltitle = item.infoLabels["title"] videolibrary(itemlist, item, function=function) if config.get_setting('downloadenabled') and ( function == 'episodios' or function == 'findvideos'): download(itemlist, item, function=function) if 'patronMenu' in args and itemlist: itemlist = thumb(itemlist, genre=True) if 'fullItemlistHook' in args: itemlist = args['fullItemlistHook'](itemlist) # itemlist = filterLang(item, itemlist) # causa problemi a newest return itemlist
def wrapper(*args): function = func.__name__ itemlist = [] args = func(*args) # log('STACK= ',inspect.stack()[1][3]) item = args['item'] action = args['action'] if 'action' in args else 'findvideos' anime = args['anime'] if 'anime' in args else '' addVideolibrary = args[ 'addVideolibrary'] if 'addVideolibrary' in args else True search = args['search'] if 'search' in args else '' blacklist = args['blacklist'] if 'blacklist' in args else [] data = args['data'] if 'data' in args else '' patron = args['patron'] if 'patron' in args else args[ 'patronMenu'] if 'patronMenu' in args else '' if 'headers' in args: headers = args['headers'] elif 'headers' in func.__globals__: headers = func.__globals__['headers'] else: headers = '' patronNext = args['patronNext'] if 'patronNext' in args else '' patronBlock = args['patronBlock'] if 'patronBlock' in args else '' typeActionDict = args[ 'type_action_dict'] if 'type_action_dict' in args else {} typeContentDict = args[ 'type_content_dict'] if 'type_content_dict' in args else {} debug = args['debug'] if 'debug' in args else False log('STACK= ', inspect.stack()[1][3]) if 'pagination' in args and inspect.stack()[1][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes' ]: pagination = args['pagination'] if args['pagination'] else 20 else: pagination = '' lang = args['deflang'] if 'deflang' in args else '' pag = item.page if item.page else 1 # pagination matches = [] log('PATRON= ', patron) if not data: data = httptools.downloadpage( item.url, headers=headers, ignore_response_code=True).data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about log('DATA =', data) if patronBlock: blocks = scrapertoolsV2.find_multiple_matches_groups( data, patronBlock) block = "" for bl in blocks: blockItemlist, blockMatches = scrapeBlock( item, args, bl['block'], patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) for it in blockItemlist: if 'lang' in bl: it.contentLanguage, it.title = scrapeLang( bl, it.contentLanguage, it.title) if 'quality' in bl and bl['quality']: it.quality = bl['quality'].strip() it.title = it.title + typo(bl['quality'].strip(), '_ [] color kod') log('BLOCK ', '=', block) itemlist.extend(blockItemlist) matches.extend(blockMatches) elif patron: itemlist, matches = scrapeBlock(item, args, data, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) checkHost(item, itemlist) if 'itemlistHook' in args: itemlist = args['itemlistHook'](itemlist) if patronNext: nextPage(itemlist, item, data, patronNext, 2) # next page for pagination if pagination and len(matches) >= pag * pagination: if inspect.stack()[1][3] != 'get_newest': itemlist.append( Item(channel=item.channel, action=item.action, contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), fulltitle=item.fulltitle, show=item.show, url=item.url, args=item.args, page=pag + 1, thumbnail=thumb())) if action != 'play' and function != 'episodios' and 'patronMenu' not in args: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) from specials import autorenumber if anime: if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold') else: autorenumber.renumber(itemlist) if anime and autorenumber.check(item) == False: pass else: if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): # item.fulltitle = item.infoLabels["title"] videolibrary(itemlist, item, function=function) if config.get_setting('downloadenabled') and ( function == 'episodios' or function == 'findvideos'): download(itemlist, item, function=function) if 'patronMenu' in args: itemlist = thumb(itemlist, genre=True) if 'fullItemlistHook' in args: itemlist = args['fullItemlistHook'](itemlist) # itemlist = filterLang(item, itemlist) # causa problemi a newest return itemlist