def genres(item): support.log('genres',item) action = 'peliculas' ## item.contentType = 'movie' if item.args == 'genres': patronBlock = r'<ul class="listSubCat" id="Film">(?P<block>.*)</ul>' elif item.args == 'years': patronBlock = r'<ul class="listSubCat" id="Anno">(?P<block>.*)</ul>' elif item.args == 'quality': patronBlock = r'<ul class="listSubCat" id="Qualita">(?P<block>.*)</ul>' elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì patronBlock = r'FILM RANDOM.*?class="listSubCat">(?P<block>.*)</ul>' action = 'findvideos' ## item.args = '' patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)<' return locals()
def newest(categoria): support.log(categoria) itemlist = [] item = support.Item() try: if categoria == "anime": item.contentType = 'tvshow' item.url = host item.args = 'newest' return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return [] return itemlist
def peliculas(item): support.log(item) #support.dbg() # decommentare per attivare web_pdb #findhost() blacklist = [''] if item.args != 'search': patron = r'<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="(?P<url>[^"]+)" title="(?P<title>.+?)[ ]?(?:\[(?P<lang>Sub-ITA)\])?".*?<img(?:.+?)?src="(?P<thumb>[^"]+)"' patronBlock = r'<div class="showpost4 posthome">(?P<block>.*?)</section>' else: patron = r'<li class="col-md-12 itemlist">.*?<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)".*?<img src="(?P<thumb>[^"]+)".*?Film dell"anno: (?P<year>\d{4})(?:[\d\-]+)?</p> <p class="text-list">(?P<plot>[^<>]+)</p>' patronBlock = r'<ul class="search-results-content infinite">(?P<block>.*?)</section>' patronNext = '<a href="([^"]+)"\s+?><i class="glyphicon glyphicon-chevron-right"' #support.regexDbg(item, patronBlock, headers) # debug = True return locals()
def pagina(url): support.log(url) #findhost() data = httptools.downloadpage(url, headers=headers).data.replace("'", '"') #support.log("DATA ----###----> ", data) if 'clicca qui per aprire' in data.lower(): url = scrapertools.find_single_match(data, '"go_to":"([^"]+)"') url = url.replace("\\","") # Carica la pagina data = httptools.downloadpage(url, headers=headers).data.replace("'", '"') elif 'clicca qui</span>' in data.lower(): url = scrapertools.find_single_match(data, '<h2 style="text-align: center;"><a href="([^"]+)">') # Carica la pagina data = httptools.downloadpage(url, headers=headers).data.replace("'", '"') return data
def peliculas(item): support.log() itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data json_object = jsontools.load(data) for movie in json_object['hydra:member']: itemlist.extend(get_itemlist_movie(movie, item)) try: if support.inspect.stack()[1][3] not in ['newest']: support.nextPage(itemlist, item, next_page=json_object['hydra:view']['hydra:next']) except: pass return itemlist
def findvideos(item): support.log('findvideos ->', item) itemlist = [] if item.contentType != 'movie': return support.server(item, item.url) else: links = str( support.match( item, r'SRC="([^"]+)"', patronBlock= r'<div class="col-md-10">(.+?)<div class="swappable" id="links">' )[0]) if links: links = links.replace('#', 'speedvideo.net') return support.server(item, links) else: return support.server(item)
def extract(): import zipfile support.log('Estraggo Quasar in:', quasar_path) with zipfile.ZipFile(filename, 'r') as zip_ref: zip_ref.extractall(xbmc.translatePath("special://home/addons/")) xbmc.executebuiltin('UpdateLocalAddons') if platformtools.dialog_ok('Quasar', config.get_localized_string(70783)): if filetools.exists(filename): filetools.remove(filename) xbmc.executeJSONRPC( '{"jsonrpc": "2.0", "id":1, "method": "Addons.SetAddonEnabled", "params": { "addonid": "plugin.video.quasar", "enabled": true }}' ) updater.refreshLang() xbmcaddon.Addon(id="plugin.video.quasar").setSetting( 'download_path', config.get_setting('downloadpath')) xbmc.executebuiltin('UpdateLocalAddons') sleep(2)
def itemHook(item): support.log("ITEMHOOK -> ", item) item = language(item) if 'anime' in item.url: item.contentType = 'tvshow' item.action = 'episodios' #item.args = 'anime' else: if item.nep == '1': item.contentType = 'movie' item.action = 'findvideos' else: item.contentType = 'episode' item.args = '' item.nep = item.nep item.action = 'findmovie' return item
def findvideos(item): support.log() if item.contentType == "tvshow": ret = support.dooplay_get_links(item, host) if ret == []: return episodios(item) else: item.url = ret[0]["url"] return videoplayer(item) #if item.args == "movies" or "movie": if item.contentType == 'movie': return videoplayer(item) else: return halfplayer(item)
def remove_channel(item): support.log() path = filetools.join(config.get_data_path(), 'community_channels.json') community_json = open(path, "r") community_json = jsontools.load(community_json.read()) id = item.channel_id to_delete = community_json['channels'][id]['channel_name'] del community_json['channels'][id] with open(path, "w") as file: file.write(jsontools.dump(community_json)) file.close() platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70684) % to_delete) platformtools.itemlist_refresh() return
def findvideos(item): log() itemlist = [] # data = httptools.downloadpage(item.url, headers=headers).data patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">' # bloque = scrapertools.find_single_match(data, patronBlock) patron = r'<a href="([^"]+)">' # matches = re.compile(patron, re.DOTALL).findall(bloque) matches, data = support.match(item, patron, patronBlock, headers) for scrapedurl in matches: if 'is.gd' in scrapedurl: resp = httptools.downloadpage( scrapedurl, follow_redirects=False) data += resp.headers.get("location", "") + '\n'
def load_json(item, no_order=False): support.log() url = item.url if type(item) == Item else item try: if url.startswith('http'): json_file = httptools.downloadpage(url).data else: json_file = open(url, "r").read() if no_order or item.filterkey: json = jsontools.load(json_file) else: json = jsontools.load(json_file, object_pairs_hook=OrderedDict) except: json = {} return json
def load_and_check(item): support.log() path = filetools.join(config.get_data_path(), 'community_channels.json') file = open(path, "r") json = jsontools.load(file.read()) for key, channel in json['channels'].items(): if not 'checked' in channel: response = httptools.downloadpage(channel['path'], follow_redirects=True, timeout=5) if response.sucess: channel['path'] = response.url channel['channel_name'] = re.sub(r'\[[^\]]+\]', '', channel['channel_name']) channel['check'] = True with open(path, "w") as file: file.write(jsontools.dump(json)) file.close() return json
def serietv(item): #import web_pdb; web_pdb.set_trace() # lista serie tv support.log() itemlist = [] if item.args: # il titolo degli episodi viene inglobato in episode ma non sono visibili in newest!!! patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^–]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>' listGroups = ['title', 'url', 'title2'] patronNext = '' else: patron = r'<div class="post-thumb">.*?\s<img src="([^"]+)".*?><a href="([^"]+)".*?>(.*?(?:\((\d{4})\)|(\d{4}))?)<\/a><\/h2>' listGroups = ['thumb', 'url', 'title', 'year', 'year'] patronNext='a class="next page-numbers" href="?([^>"]+)">Avanti »</a>' itemlist = support.scrape(item, patron_block='', patron=patron, listGroups=listGroups, patronNext=patronNext, action='episodios') return itemlist
def peliculas(item): support.log('peliculas', item) ## support.dbg() action = "findvideos" if item.args == "search": patronBlock = r'</script> <div class="boxgrid caption">(?P<block>.*)<div id="right_bar">' else: patronBlock = r'<div class="cover_kapsul ml-mask">(?P<block>.*)<div class="page_nav">' patron = r'<div class="cover boxcaption"> <h2>.<a href="(?P<url>[^"]+)">.*?<.*?src="(?P<thumb>[^"]+)"'\ '.+?[^>]+>[^>]+<div class="trdublaj"> (?P<quality>[A-Z/]+)<[^>]+>(?:.[^>]+>(?P<lang>.*?)<[^>]+>).*?'\ '<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> '\ '[^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+).+?>.*?<p>(?P<plot>[^<]+)<' patronNext = '<span>\d</span> <a href="([^"]+)">' ## support.regexDbg(item, patron, headers) return locals()
def select(item): support.log() itemlist = [] json = current_session.get(item.url).json()['blocks'] for key in json: itemlist.append( support.Item(channel=item.channel, title=support.typo(key['name'], 'bold'), fulltitle=item.fulltitle, show=item.show, thumbnail=item.thumbnail, url=key['sets'], action='episodios', args=item.args)) if len(itemlist) == 1: return episodios(itemlist[0]) else: return itemlist
def genres(item): support.log() itemlist = [] matches = support.match( item, r'<input.*?name="([^"]+)" value="([^"]+)"\s*>[^>]+>([^<]+)<\/label>', r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> Generi <span.[^>]+>(.*?)</ul>', headers=headers)[0] for name, value, title in matches: support.menuItem(itemlist, __channel__, support.typo(title, 'bold'), 'peliculas', host + '/filter?' + name + '=' + value + '&sort=' + order(), 'tvshow', args='sub') return itemlist
def replay_channels(item): support.log() itemlist = [] json = current_session.get(item.url).json()['dirette'] for key in json: itemlist.append( support.Item( channel=item.channel, title=support.typo(key['channel'], 'bold'), fulltitle=key['channel'], show=key['channel'], plot=item.title, action='replay', thumbnail=key['transparent-icon'].replace( "[RESOLUTION]", "256x-"), url='%s/palinsesto/app/old/%s/%s.json' % (host, key['channel'].lower().replace(' ', '-'), item.date))) return itemlist
def set_title(title, language='', quality=''): support.log() t = support.match(title, patron=r'\{([^\}]+)\}').match if 'bold' not in t: t += ' bold' title = re.sub(r'(\{[^\}]+\})','',title) title = support.typo(title,t) if quality: title += support.typo(quality, '_ [] color kod bold') if language: if not isinstance(language, list): title += support.typo(language.upper(), '_ [] color kod bold') else: for lang in language: title += support.typo(lang.upper(), '_ [] color kod bold') return title
def dooplayer(item): support.log() itemlist = [] url = item.url data = httptools.downloadpage(url, headers=headers).data link = scrapertools.find_single_match( data, r'(https://mondolunatico.tk/./[^"]+)') data = httptools.downloadpage(link, headers=headers).data if "panel_toggle toggleable" in data: item.url = link return player_list(item) # Correggo il link con il lin del POST link1 = link.replace("/v/", "/api/source/").replace("/p/", "/api/source/") postData = urllib.urlencode({ "r": link, "d": "modolunatico.tk", }) block = httptools.downloadpage(link1, post=postData).data patron = r'"file":".*?\/(r[^"]+)' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedurl in matches: scrapedurl = "https://fvs.io/" + scrapedurl itemlist.append( Item(channel=__channel__, action="play", contentType=item.contentType, title=item.title, thumbnail=item.thumbnail, fulltitle=item.title, url=scrapedurl, show=item.show)) autoplay.start(itemlist, item) support.videolibrary(itemlist, item, 'color kod') return itemlist
def findvideos(item): log() listurl = set() # itemlist = [] support.log("ITEMLIST: ", item) ## if item.args == 'anime': ## data = item.url ## else: ## data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) check = scrapertools.find_single_match( data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>') if 'sub' in check.lower(): item.contentLanguage = 'Sub-ITA' support.log("CHECK : ", check) if 'anime' in check.lower(): item.contentType = 'tvshow' item.data = data support.log('select = ### è una anime ###') return episodios(item) elif 'serie' in check.lower(): item.contentType = 'tvshow' item.data = data return episodios(item) if 'protectlink' in data: urls = scrapertools.find_multiple_matches( data, r'<iframe src="[^=]+=(.*?)"') support.log("SONO QUI: ", urls) for url in urls: url = url.decode('base64') # tiro via l'ultimo carattere perchè non c'entra url, c = unshorten_only(url) if 'nodmca' in url: page = httptools.downloadpage(url, headers=headers).data url = '\t' + scrapertools.find_single_match( page, '<meta name="og:url" content="([^=]+)">') if url: listurl.add(url) data += '\n'.join(listurl) return support.server(item, data) #, headers=headers)
def episodios(item): log() itemlist = [] patron = r'<div\sclass="[^"]+">\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s' patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?' patron += r'(?:<img\sclass="[^"]+" meta-src="([^"]+)"[^>]+>|<img\sclass="[^"]+" src="" data-original="([^"]+)"[^>]+>)?' matches = support.match(item, patron, headers=headers)[0] for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches: scrapedtitle = cleantitle(scrapedtitle) scrapedepisode = scrapedepisode.zfill(2) scrapedepisodetitle = cleantitle(scrapedepisodetitle) title = str( "%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip() if 'SUB-ITA' in scrapedtitle: title += " " + support.typo("Sub-ITA", '_ [] color kod') infoLabels = {} infoLabels['season'] = scrapedseason infoLabels['episode'] = scrapedepisode itemlist.append( Item(channel=item.channel, action="findvideos", title=support.typo(title, 'bold'), fulltitle=scrapedtitle, url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3, contentType="episode", plot=scrapedplot, contentSerieName=scrapedserie, contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '', infoLabels=infoLabels, thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.videolibrary(itemlist, item) return itemlist
def pagination(item, itemlist=[]): support.log() import json itlist = [] if not itemlist: itemlist = [] for it in item.itemlist: itemlist.append(Item().fromurl(it)) encoded_itemlist = [] for it in itemlist: encoded_itemlist.append(it.tourl()) if inspect.stack()[1][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes', 'search' ]: Pagination = int(defp) if defp.isdigit() else '' else: Pagination = '' pag = item.page if item.page else 1 for i, item in enumerate(itemlist): if Pagination and (pag - 1) * Pagination > i: continue # pagination if Pagination and i >= pag * Pagination: break # pagination itlist.append(item) if Pagination and len(itemlist) >= Pagination: if inspect.stack()[1][3] != 'get_newest': itlist.append( Item(channel=item.channel, action='pagination', contentType=item.contentType, title=support.typo(config.get_localized_string(30992), 'color kod bold'), fulltitle=item.fulltitle, show=item.show, url=item.url, args=item.args, page=pag + 1, path=item.path, media_type=item.media_type, thumbnail=support.thumb(), itemlist=encoded_itemlist)) return itlist
def findvideos(item): log() listurl = set() itemlist = [] support.log("ITEMLIST: ", item) data = support.match(item.url, headers=headers).data check = support.match( data, patron=r'<div class="category-film">(.*?)</div>').match if 'sub' in check.lower(): item.contentLanguage = 'Sub-ITA' support.log("CHECK : ", check) if 'anime' in check.lower(): item.contentType = 'tvshow' item.data = data support.log('select = ### è una anime ###') try: return episodios(item) except: pass elif 'serie' in check.lower(): item.contentType = 'tvshow' item.data = data return episodios(item) if 'protectlink' in data: urls = scrapertools.find_multiple_matches( data, r'<iframe src="[^=]+=(.*?)"') support.log("SONO QUI: ", urls) for url in urls: url = url.decode('base64') # tiro via l'ultimo carattere perchè non c'entra url, c = unshorten_only(url) if 'nodmca' in url: page = httptools.downloadpage(url, headers=headers).data url = '\t' + scrapertools.find_single_match( page, '<meta name="og:url" content="([^=]+)">') if url: listurl.add(url) data += '\n'.join(listurl) itemlist = support.server(item, data + item.otherLinks, patronTag='Keywords:\s*<span>([^<]+)') return itemlist
def player_list(item): support.log() itemlist = [] # Scarico la pagina data = httptools.downloadpage(item.url, headers=headers).data if "panel_toggle toggleable" in data: # Prelevo il blocco lista puntate block = scrapertools.find_single_match( data, r'panel_toggle toggleable.*?(<div.*?)<!-- Javascript -->') patron = r'data-url="([^"]+)">.*?([A-Z].*?) ' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedurl, scrapedtitle in matches: scrapedtitle = re.sub('mp4|avi|mkv', '', scrapedtitle) scrapedtitle = re.sub( 'WebRip|WEBRip|x264|AC3|1080p|DLMux|XviD-|BDRip|BluRay|HD|WEBMux|H264|BDMux|720p|TV|NFMux|DVDRip|DivX|DVDip|Ac3|Dvdrip|Mux|NovaRip|DVD|SAT|Divx', '', scrapedtitle) scrapedtitle = re.sub( 'ITA|ENG|Italian|SubITA|SUBITA|iTALiAN|LiAN|Ita', '', scrapedtitle) scrapedtitle = re.sub( 'Pir8|UBi|M L|BEDLAM|REPACK|DD5.1|bloody|SVU', '', scrapedtitle) scrapedtitle = scrapedtitle.replace(".", " ").replace( " - ", " ").replace(" -", "").replace(" ", "") itemlist.append( Item(channel=__channel__, action="halfplayer", contentType=item.contentType, title=scrapedtitle, thumbnail=item.thumbnail, fulltitle=scrapedtitle, url="https://mondolunatico.tk" + scrapedurl, show=item.show)) support.videolibrary(itemlist, item, 'color kod') return itemlist else: return player(item)
def peliculas(item): support.log() if item.args == 'search': action = '' patron = r'<div class="cnt">.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>\s+(?P<title>.+?)(?:\[(?P<lang>Sub-ITA|SUB-ITA|SUB)\])?\s?(?:\[?(?P<quality>HD).+\]?)?\s?(?:\(?(?P<year>\d+)?\)?)?\s+<[^>]+>[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)"[^<]+<' patronBlock = r'<div class="container">(?P<block>.*?)</main>' elif item.contentType == 'movie': if not item.args: # voce menu: Film patronBlock = r'<h1>Film streaming ita in alta definizione</h1>(?P<block>.*?)<div class="content-sidebar">' patron = r'<div class="timeline-right">[^>]+>\s<a href="(?P<url>.*?)".*?src="(?P<thumb>.*?)".*?<h3 class="timeline-post-title">(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<' patronNext = r'<a class="page-link" href="([^"]+)">>' elif item.args == 'cinema': patronBlock = r'<div class="owl-carousel" id="postCarousel">(?P<block>.*?)<section class="main-content">' patron = r'background-image: url\((?P<thumb>.*?)\).*?<h3.*?>(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<.+?<a.+?<a href="(?P<url>[^"]+)"[^>]+>' elif item.args == 'genres': # ci sono dei titoli dove ' viene sostituito con " da support data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data data = re.sub('\n|\t', ' ', data) patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"' patronBlock = r'<div class="container">(?P<block>.*?)</main>' pagination = '' patronNext = '<a class="page-link" href="([^"]+)">>>' else: action = 'episodios' patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"' ## if item.args == 'search': ## patron = r'<div class="cnt">.*?src="([^"]+)".+?[^>]+>[^>]+>[^>]+>\s+((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)\s+<[^>]+>[^>]+>[^>]+>[ ]<a href="(?P<url>[^"]+)"' patronBlock = r'<div class="container">(?P<block>.*?)</main>' def itemHook(item): if item.args == 'search': if 'series' in item.url: item.action = 'episodios' item.contentType = 'tvshow' else: item.action = 'findvideos' item.contentType = 'movie' return item #debug = True return locals()
def filter_thread(filter, key, item, description): thumbnail = plot = fanart = '' if item.filterkey in ['actors', 'director']: dict_ = { 'url': 'search/person', 'language': lang, 'query': filter, 'page': 1 } tmdb_inf = tmdb.discovery(item, dict_=dict_) id = None if tmdb_inf.results: results = tmdb_inf.results[0] id = results['id'] if id: thumbnail = 'http://image.tmdb.org/t/p/original' + results[ 'profile_path'] if results['profile_path'] else item.thumbnail json_file = httptools.downloadpage( 'http://api.themoviedb.org/3/person/' + str(id) + '?api_key=' + tmdb_api + '&language=en', use_requests=True).data support.log(json_file) plot += jsontools.load(json_file)['biography'] if description: if filter in description: extra = set_extra_values(item, description[filter], item.path) thumbnail = extra.thumb if extra.thumb else item.thumbnail fanart = extra.fanart if extra.fanart else item.fanart plot = extra.plot if extra.plot else item.plot item = Item(channel=item.channel, title=support.typo(filter, 'bold'), url=item.url, media_type=item.media_type, action='peliculas', thumbnail=thumbnail if thumbnail else item.thumbnail, fanart=fanart if fanart else item.fanart, plot=plot if plot else item.plot, path=item.path, filterkey=item.filterkey, filter=filter, key=key) return item
def newest(categoria): support.log(categoria) item = support.Item() try: if categoria == "series": item.contentType= 'tvshow' item.url = host + '/ultimi-episodi-aggiunti' item.args = "lastep" if categoria == "peliculas": item.contentType= 'movie' item.url = host + '/ultimi-film-aggiunti' item.args = "last" return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return []
def findvideos(item): support.log(item) itemlist = [] if item.args == 'updated': ep = support.match(item.fulltitle, r'(Episodio\s*\d+)')[0][0] item.url = support.re.sub(r'episodio-\d+-|oav-\d+-', '', item.url) if 'streaming' not in item.url: item.url = item.url.replace('sub-ita', 'sub-ita-streaming') item.url = support.match( item, r'<a href="([^"]+)"[^>]+>', ep + '(.*?)</tr>', )[0][0] urls = support.match(item.url, r'(episodio\d*.php.*)')[0] for url in urls: url = host + '/' + url headers['Referer'] = url data = support.match(item, headers=headers, url=url)[1] cookies = "" matches = support.re.compile( '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), support.re.DOTALL).findall(support.config.get_cookie_data()) for cookie in matches: cookies += cookie.split('\t')[5] + "=" + cookie.split( '\t')[6] + ";" headers['Cookie'] = cookies[:-1] url = support.match(data, r'<source src="([^"]+)"[^>]+>' )[0][0] + '|' + support.urllib.urlencode(headers) itemlist.append( support.Item(channel=item.channel, action="play", title='diretto', quality='', url=url, server='directo', fulltitle=item.fulltitle, show=item.show)) return support.server(item, url, itemlist)
def set_extra_values(item, json, path): support.log() ret = Item() for key in json: if key == 'quality': ret.quality = json[key].upper() elif key == 'language': ret.language = json[key].upper() elif key == 'plot': ret.plot = json[key] elif key in ['poster', 'thumbnail']: ret.thumb = json[key] if ':/' in json[key] else filetools.join(path,json[key]) if '/' in json[key] else get_thumb(json[key]) elif key == 'fanart': ret.fanart = json[key] if ':/' in json[key] else filetools.join(path,json[key]) elif key in ['url', 'link']: ret.url = json[key] if ':/' in json[key] or type(json[key]) == dict else filetools.join(path,json[key]) elif key == 'seasons_list': ret.url = {} ret.url['seasons_list'] = json['seasons_list'] elif key == 'episodes_list': ret.url = {} ret.url['episodes_list'] = json['episodes_list'] elif key == 'links': ret.url={} ret.url['links'] = json[key] elif key == 'filter': filterkey = json[key].keys()[0] ret.filter = json[key][filterkey] ret.filterkey = filterkey elif key == 'description': ret.description = json[key] if not ret.thumb: if 'get_search_menu' in inspect.stack()[1][3]: ret.thumb = get_thumb('search.png') else: ret.thumb = item.thumbnail if not ret.fanart: ret.fanart = item.fanart if not ret.plot: ret.plot = item.plot return ret