def anime(item): log() itemlist = [] seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0] for season in seasons: episodes = scrapertools.find_multiple_matches( season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)') for url, title, urls, none in episodes: urls = scrapertools.find_multiple_matches( urls, '<a.*?href="([^"]+)"[^>]+>') for url2 in urls: url += url2 + '\n' #log('EP URL',url) itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contentType, title=support.typo(title + ' - ' + item.fulltitle, 'bold'), url=url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail, args=item.args)) autorenumber.renumber(itemlist, item, 'bold') support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>') itemlist.append( Item(channel=item.channel, action='findvideos', contentType='episode', title=support.typo('Episodio 1 bold'), fulltitle=item.title, url=item.url, thumbnail=item.thumbnail)) if block: matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data) for url, number in matches: itemlist.append( Item(channel=item.channel, action='findvideos', contentType='episode', title=support.typo('Episodio ' + number,'bold'), fulltitle=item.title, url=url, thumbnail=item.thumbnail)) autorenumber.renumber(itemlist, item) support.videolibrary return itemlist
def episodios(item): log() itemlist = [] patron_block = r'<div class="widget servers".*?>(.*?)<div id="download"' patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<' matches = support.match(item, patron, patron_block)[0] for scrapedurl, scrapedtitle in matches: itemlist.append( Item( channel=item.channel, action="findvideos", contentType="episode", title='[B] Episodio ' + scrapedtitle + '[/B]', url=urlparse.urljoin(host, scrapedurl), fulltitle=scrapedtitle, show=scrapedtitle, plot=item.plot, fanart=item.thumbnail, thumbnail=item.thumbnail)) autorenumber.renumber(itemlist, item,'bold') support.videolibrary(itemlist, item) return itemlist
def episodios(item): itemlist = scrape( item, r'<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>', ['url', 'title', 'title2'], patron_block='<div class="seasonEp">(.*?)<div class="footer">') renumber(itemlist, item, 'bold') return itemlist
def peliculas(item): itemlist = scrape( item, r'Lingua[^<]+<br>\s*<a href="(?:Lista episodi )?([^"]+)" title="(?:Lista episodi )?(.*?)(?: \(([0-9]+)\))?(?: Streaming)?">', ['url', 'title', 'year'], action='episodios', patron_block= '<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">', patronNext='<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">') renumber(itemlist) return itemlist
def peliculas(item): itemlist = [] if not item.args: json_file = current_session.get(item.url + 'channels', headers=headers, params=payload).json() names = [i['filter'] for i in json_file['data'] if 'filter' in i][0] with futures.ThreadPoolExecutor() as executor: json_file = [ executor.submit( dl_pages, name, item, ) for name in names ] for res in futures.as_completed(json_file): if res.result(): itemlist += res.result() itemlist = sorted(itemlist, key=lambda it: it.fulltitle) elif ('=' not in item.args) and ('=' not in item.url): json_file = current_session.get(item.url + item.args, headers=headers, params=payload).json() make_itemlist(itemlist, item, json_file) elif '=' in item.args: json_file = current_session.get(item.url + 'channels', headers=headers, params=payload).json() Filter = support.match(item.args, patron=r'\?([^=]+)=').match keys = [i[Filter] for i in json_file['data'] if Filter in i][0] for key in keys: if key not in ['1', '2']: itemlist.append( Item( channel=item.channel, title=support.typo( key.upper() if Filter == 'filter' else key['name'], 'bold'), url=item.url + item.args + (key if Filter == 'filter' else str(key['id'])), action='peliculas', args='filters', contentType=item.contentType)) else: json_file = current_session.get(item.url, headers=headers, params=payload).json() make_itemlist(itemlist, item, json_file) if item.contentType != 'movie': autorenumber.renumber(itemlist) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def peliculas(item): itemlist = [] if not item.args: json_file = current_session.get(item.url + 'channels', headers=headers, params=payload).json() names = [i['filter'] for i in json_file['data'] if 'filter' in i][0] for name in names: url = item.url + 'channel/10003/last/?filter=' + str(name) json_file = current_session.get(url, headers=headers, params=payload).json() if 'data' in json_file: json_file = current_session.get(url, headers=headers, params=payload).json() make_itemlist(itemlist, item, json_file) elif ('=' not in item.args) and ('=' not in item.url): json_file = current_session.get(item.url + item.args, headers=headers, params=payload).json() make_itemlist(itemlist, item, json_file) elif '=' in item.args: json_file = current_session.get(item.url + 'channels', headers=headers, params=payload).json() Filter = support.match(item.args, r'\?([^=]+)=')[0][0] keys = [i[Filter] for i in json_file['data'] if Filter in i][0] for key in keys: if key not in ['1', '2']: itemlist.append( Item( channel=item.channel, title=support.typo( key.upper() if Filter == 'filter' else key['name'], 'bold'), url=item.url + item.args + (key if Filter == 'filter' else str(key['id'])), action='peliculas', args='filters', contentType=item.contentType)) else: json_file = current_session.get(item.url, headers=headers, params=payload).json() make_itemlist(itemlist, item, json_file) if item.contentType != 'movie': autorenumber.renumber(itemlist) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def lista_anime(item): log() itemlist = [] matches, data = support.match( item, r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>' ) for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches: if scrapedoriginal == scrapedtitle: scrapedoriginal = '' else: scrapedoriginal = support.typo(scrapedoriginal, ' -- []') year = '' lang = '' infoLabels = {} if '(' in scrapedtitle: year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))') lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))') infoLabels['year'] = year title = scrapedtitle.replace(year, '').replace(lang, '').strip() original = scrapedoriginal.replace(year, '').replace(lang, '').strip() if lang: lang = support.typo(lang, '_ color kod') longtitle = '[B]' + title + '[/B]' + lang + original itemlist.append( Item(channel=item.channel, extra=item.extra, contentType="episode", action="episodios", title=longtitle, url=scrapedurl, thumbnail=scrapedthumb, fulltitle=title, show=title, infoLabels=infoLabels, plot=scrapedplot, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) # Next page support.nextPage(itemlist, item, data, r'<a class="page-link" href="([^"]+)" rel="next"') return itemlist
def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)') # movie or series movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie') data = httptools.downloadpage(host + "/loading_anime?anime_id=" + anime_id, headers={ 'X-Requested-With': 'XMLHttpRequest' }).data patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*' patron += r'<td style="[^"]+"><a href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedtitle, scrapedurl in matches: scrapedtitle = cleantitle(scrapedtitle) scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle) scrapedtitle = '[B]' + scrapedtitle + '[/B]' itemlist.append( Item(channel=item.channel, action="findvideos", contentType="episode", title=scrapedtitle, url=urlparse.urljoin(host, scrapedurl), fulltitle=scrapedtitle, show=scrapedtitle, plot=item.plot, fanart=item.thumbnail, thumbnail=item.thumbnail)) if ((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType != 'movie': item.url = itemlist[0].url item.contentType = 'movie' return findvideos(item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist, item) support.videolibrary(itemlist, item, 'bold color kod') return itemlist
def episodios(item): itemlist = [] json_file = current_session.get(item.url, headers=headers, params=payload).json() show_id = str(json_file['data'][0]['show_id']) season_id = str(json_file['data'][0]['season_id']) episodes = [] support.log('SEASON ID= ', season_id) for episode in json_file['data']: episodes.append(episode['episodes']) for episode in episodes: for key in episode: if 'stagione' in encode(key['title']).lower(): match = support.match( encode(key['title']), patron=r'[Ss]tagione\s*(\d+) - [Ee]pisodio\s*(\d+)').match title = match[0] + 'x' + match[1] + ' - ' + item.fulltitle make_item = True elif int(key['season_id']) == int(season_id): try: title = 'Episodio ' + key['number'] + ' - ' + key[ 'title'].encode('utf8') except: title = 'Episodio ' + key['number'] + ' - ' + key['title'] make_item = True else: make_item = False if make_item == True: if type(title) == tuple: title = title[0] itemlist.append( Item(channel=item.channel, title=title, fulltitle=item.fulltitle, show=item.show, url=host + show_id + '/season/' + str(key['season_id']) + '/', action='findvideos', video_id=key['video_id'], thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, contentType=item.contentType)) autorenumber.renumber(itemlist, item, 'bold') if autorenumber.check(item) == True \ or support.match(itemlist[0].title, patron=r"(\d+x\d+)").match: support.videolibrary(itemlist, item) return itemlist
def peliculas(item): log() itemlist = [] blacklist = ['top 10 anime da vedere'] matches, data = support.match(item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+') for url, title, thumb in matches: title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "") lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))") videoType = '' if 'movie' in title.lower(): videoType = ' - (MOVIE)' if 'ova' in title.lower(): videoType = ' - (OAV)' cleantitle = title.replace(lang, "").replace('(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip() if not videoType : contentType="tvshow" action="episodios" else: contentType="movie" action="findvideos" if not title.lower() in blacklist: itemlist.append( Item(channel=item.channel, action=action, contentType=contentType, title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang,'_ [] color kod'), fulltitle=cleantitle, show=cleantitle, url=url, thumbnail=thumb)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">') return itemlist
def peliculas(item): log() action = 'findvideos' if item.extra == 'movie' else 'episodios' if item.args == 'movie' or item.extra == 'movie': patron = r'<div class="mediaWrap mediaWrapAlt">[^<]+<a href="(?P<url>[^"]+)" title="Permalink to\s(?P<title>[^"]+) \((?P<year>[^<]+)\).*?"[^>]+>[^<]+<img[^s]+src="(?P<thumb>[^"]+)"[^>]+>[^<]+<\/a>.*?<p>\s*(?P<quality>[a-zA-Z-0-9]+)\s*<\/p>' patronBlock = '<div id="main_col">(?P<block>.*?)main_col' ## itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patron_block='<div id="main_col">(.*?)main_col', patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">') patronNext = '<a class="nextpostslink" rel="next" href="([^"]+)">' else: patron = r'<div class="media3">[^>]+><a href="(?P<url>[^"]+)"><img[^s]+src="(?P<thumb>[^"]+)"[^>]+><\/a><[^>]+><a[^<]+><p>(?P<title>[^<]+) \((?P<year>[^\)]+)[^<]+<\/p>.*?<p>\s*(?P<quality>[a-zA-Z-0-9]+)\s*<\/p>' patronNext = '<a class="nextpostslink" rel="next" href="([^"]+)">' action = action # itemlist = support.scrape(item, patron, ['url', 'thumb', 'title', 'year', 'quality'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">') if item.args == 'anime': autorenumber.renumber(itemlist) ## return itemlist return locals()
def lista_anime(item): log() itemlist = [] PERPAGE = 15 p = 1 if '{}' in item.url: item.url, p = item.url.split('{}') p = int(p) if '||' in item.url: series = item.url.split('\n\n') matches = [] for i, serie in enumerate(series): matches.append(serie.split('||')) else: # Estrae i contenuti patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<' matches = support.match(item, patron, headers=headers)[0] scrapedplot = "" scrapedthumbnail = "" for i, (scrapedurl, scrapedtitle) in enumerate(matches): if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)') movie = False showtitle = title if '(ITA)' in title: title = title.replace('(ITA)', '').strip() showtitle = title else: title += ' ' + support.typo('Sub-ITA', '_ [] color kod') infoLabels = {} if 'Akira' in title: movie = True infoLabels['year'] = 1988 if 'Dragon Ball Super Movie' in title: movie = True infoLabels['year'] = 2019 itemlist.append( Item(channel=item.channel, extra=item.extra, action="episodios" if movie == False else 'findvideos', title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=showtitle, show=showtitle, contentTitle=showtitle, plot=scrapedplot, contentType='episode' if movie == False else 'movie', originalUrl=scrapedurl, infoLabels=infoLabels, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) # Paginazione if len(matches) >= p * PERPAGE: support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist
def wrapper(*args): itemlist = [] args = func(*args) function = func.__name__ if not 'actLike' in args else args['actLike'] # log('STACK= ',inspect.stack()[1][3]) item = args['item'] action = args['action'] if 'action' in args else 'findvideos' anime = args['anime'] if 'anime' in args else '' addVideolibrary = args[ 'addVideolibrary'] if 'addVideolibrary' in args else True search = args['search'] if 'search' in args else '' blacklist = args['blacklist'] if 'blacklist' in args else [] data = args['data'] if 'data' in args else '' patron = args['patron'] if 'patron' in args else args[ 'patronMenu'] if 'patronMenu' in args else '' if 'headers' in args: headers = args['headers'] elif 'headers' in func.__globals__: headers = func.__globals__['headers'] else: headers = '' patronNext = args['patronNext'] if 'patronNext' in args else '' patronBlock = args['patronBlock'] if 'patronBlock' in args else '' typeActionDict = args[ 'typeActionDict'] if 'typeActionDict' in args else {} typeContentDict = args[ 'typeContentDict'] if 'typeContentDict' in args else {} debug = args['debug'] if 'debug' in args else False debugBlock = args['debugBlock'] if 'debugBlock' in args else False if 'pagination' in args and inspect.stack()[1][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes' ]: pagination = args['pagination'] if args['pagination'] else 20 else: pagination = '' lang = args['deflang'] if 'deflang' in args else '' pag = item.page if item.page else 1 # pagination matches = [] log('PATRON= ', patron) if not data: page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) # if url may be changed and channel has findhost to update if (not page.data or scrapertools.get_domain_from_url( page.url) != scrapertools.get_domain_from_url(item.url) ) and 'findhost' in func.__globals__: host = func.__globals__['findhost']() parse = list(urlparse.urlparse(item.url)) from core import jsontools jsontools.update_node(host, func.__module__.split('.')[-1], 'url') parse[1] = scrapertools.get_domain_from_url(host) item.url = urlparse.urlunparse(parse) page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) data = page.data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about if patronBlock: if debugBlock: regexDbg(item, patronBlock, headers, data) blocks = scrapertools.find_multiple_matches_groups( data, patronBlock) block = "" for bl in blocks: # log(len(blocks),bl) if 'season' in bl and bl['season']: item.season = bl['season'] blockItemlist, blockMatches = scrapeBlock( item, args, bl['block'], patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) for it in blockItemlist: if 'lang' in bl: it.contentLanguage, it.title = scrapeLang( bl, it.contentLanguage, it.title) if 'quality' in bl and bl['quality']: it.quality = bl['quality'].strip() it.title = it.title + typo(bl['quality'].strip(), '_ [] color kod') itemlist.extend(blockItemlist) matches.extend(blockMatches) elif patron: itemlist, matches = scrapeBlock(item, args, data, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) if 'itemlistHook' in args: itemlist = args['itemlistHook'](itemlist) if (pagination and len(matches) <= pag * pagination ) or not pagination: # next page with pagination if patronNext and inspect.stack()[1][3] != 'newest': nextPage(itemlist, item, data, patronNext, function) # next page for pagination if pagination and len(matches) > pag * pagination and not search: if inspect.stack()[1][3] != 'get_newest': itemlist.append( Item(channel=item.channel, action=item.action, contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), fulltitle=item.fulltitle, show=item.show, url=item.url, args=item.args, page=pag + 1, thumbnail=thumb())) if action != 'play' and function != 'episodios' and 'patronMenu' not in args: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) from specials import autorenumber if anime: if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold') else: autorenumber.renumber(itemlist) if anime and autorenumber.check( item) == False and not scrapertools.find_single_match( itemlist[0].title, r'(\d+.\d+)'): pass else: if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): # item.fulltitle = item.infoLabels["title"] videolibrary(itemlist, item, function=function) if config.get_setting('downloadenabled') and ( function == 'episodios' or function == 'findvideos'): download(itemlist, item, function=function) if 'patronMenu' in args and itemlist: itemlist = thumb(itemlist, genre=True) if 'fullItemlistHook' in args: itemlist = args['fullItemlistHook'](itemlist) # itemlist = filterLang(item, itemlist) # causa problemi a newest return itemlist
def video(item): log() itemlist = [] matches, data = support.match( item, r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', '<div class="widget-body">(.*?)<div id="sidebar"', headers=headers) for scrapedurl, scrapedthumb, scrapedinfo, scrapedoriginal, scrapedtitle in matches: # Cerca Info come anno o lingua nel Titolo year = '' lang = '' if '(' in scrapedtitle: year = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([0-9]+\))') lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))') # Rimuove Anno e Lingua nel Titolo title = scrapedtitle.replace(year, '').replace(lang, '').strip() original = scrapedoriginal.replace(year, '').replace(lang, '').strip() # Compara Il Titolo con quello originale if original == title: original = '' else: original = support.typo(scrapedoriginal, '-- []') # cerca info supplementari ep = '' ep = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ep">(.*?)<') if ep != '': ep = ' - ' + ep ova = '' ova = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ova">(.*?)<') if ova != '': ova = ' - (' + ova + ')' ona = '' ona = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ona">(.*?)<') if ona != '': ona = ' - (' + ona + ')' movie = '' movie = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="movie">(.*?)<') if movie != '': movie = ' - (' + movie + ')' special = '' special = scrapertoolsV2.find_single_match( scrapedinfo, '<div class="special">(.*?)<') if special != '': special = ' - (' + special + ')' # Concatena le informazioni lang = support.typo( 'Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else '' info = ep + lang + year + ova + ona + movie + special # Crea il title da visualizzare long_title = '[B]' + title + '[/B]' + info + original # Controlla se sono Episodi o Film if movie == '': contentType = 'tvshow' action = 'episodios' else: contentType = 'movie' action = 'findvideos' itemlist.append( Item(channel=item.channel, contentType=contentType, action=action, title=long_title, url=scrapedurl, fulltitle=title, show=title, thumbnail=scrapedthumb, context=autoplay.context, number='1')) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist) # Next page support.nextPage(itemlist, item, data, r'href="([^"]+)" rel="next"', resub=['&', '&']) return itemlist
def wrapper(*args): function = func.__name__ itemlist = [] args = func(*args) # log('STACK= ',inspect.stack()[1][3]) item = args['item'] action = args['action'] if 'action' in args else 'findvideos' anime = args['anime'] if 'anime' in args else '' addVideolibrary = args[ 'addVideolibrary'] if 'addVideolibrary' in args else True search = args['search'] if 'search' in args else '' blacklist = args['blacklist'] if 'blacklist' in args else [] data = args['data'] if 'data' in args else '' patron = args['patron'] if 'patron' in args else args[ 'patronMenu'] if 'patronMenu' in args else '' if 'headers' in args: headers = args['headers'] elif 'headers' in func.__globals__: headers = func.__globals__['headers'] else: headers = '' patronNext = args['patronNext'] if 'patronNext' in args else '' patronBlock = args['patronBlock'] if 'patronBlock' in args else '' typeActionDict = args[ 'type_action_dict'] if 'type_action_dict' in args else {} typeContentDict = args[ 'type_content_dict'] if 'type_content_dict' in args else {} debug = args['debug'] if 'debug' in args else False log('STACK= ', inspect.stack()[1][3]) if 'pagination' in args and inspect.stack()[1][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes' ]: pagination = args['pagination'] if args['pagination'] else 20 else: pagination = '' lang = args['deflang'] if 'deflang' in args else '' pag = item.page if item.page else 1 # pagination matches = [] log('PATRON= ', patron) if not data: data = httptools.downloadpage( item.url, headers=headers, ignore_response_code=True).data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about log('DATA =', data) if patronBlock: blocks = scrapertoolsV2.find_multiple_matches_groups( data, patronBlock) block = "" for bl in blocks: blockItemlist, blockMatches = scrapeBlock( item, args, bl['block'], patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) for it in blockItemlist: if 'lang' in bl: it.contentLanguage, it.title = scrapeLang( bl, it.contentLanguage, it.title) if 'quality' in bl and bl['quality']: it.quality = bl['quality'].strip() it.title = it.title + typo(bl['quality'].strip(), '_ [] color kod') log('BLOCK ', '=', block) itemlist.extend(blockItemlist) matches.extend(blockMatches) elif patron: itemlist, matches = scrapeBlock(item, args, data, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) checkHost(item, itemlist) if 'itemlistHook' in args: itemlist = args['itemlistHook'](itemlist) if patronNext: nextPage(itemlist, item, data, patronNext, 2) # next page for pagination if pagination and len(matches) >= pag * pagination: if inspect.stack()[1][3] != 'get_newest': itemlist.append( Item(channel=item.channel, action=item.action, contentType=item.contentType, title=typo(config.get_localized_string(30992), 'color kod bold'), fulltitle=item.fulltitle, show=item.show, url=item.url, args=item.args, page=pag + 1, thumbnail=thumb())) if action != 'play' and function != 'episodios' and 'patronMenu' not in args: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) from specials import autorenumber if anime: if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold') else: autorenumber.renumber(itemlist) if anime and autorenumber.check(item) == False: pass else: if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): # item.fulltitle = item.infoLabels["title"] videolibrary(itemlist, item, function=function) if config.get_setting('downloadenabled') and ( function == 'episodios' or function == 'findvideos'): download(itemlist, item, function=function) if 'patronMenu' in args: itemlist = thumb(itemlist, genre=True) if 'fullItemlistHook' in args: itemlist = args['fullItemlistHook'](itemlist) # itemlist = filterLang(item, itemlist) # causa problemi a newest return itemlist