def menu(item): support.log() itemlist = [ support.Item(channel=item.channel, title=support.typo('Tutti', 'bullet bold'), url=item.url, action='peliculas'), support.Item(channel=item.channel, title=support.typo('Generi', 'submenu'), url=item.url, args='genre', action='submenu'), support.Item(channel=item.channel, title=support.typo('A-Z', 'submenu'), url=item.url, args='az', action='submenu'), support.Item(channel=item.channel, title=support.typo('Cerca', 'submenu'), url=item.url, action='search') ] return support.thumb(itemlist)
def newest(categoria): support.log(categoria) item = support.Item() try: if categoria == "anime": item.contentType='tvshow' item.url = host item.args = "last" return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return []
def learning(item): support.log() itemlist = [] json = current_session.get(item.url).json()['contents'] for key in json: support.log(key['name']) itemlist.append( support.Item(channel=item.channel, title=support.typo(key['name'], 'bold'), fulltitle=key['name'], show=key['name'], url=key['contents'], thumbnail=item.thumbnail, action='peliculas', args=item.args)) return itemlist
def newest(categoria): support.info() itemlist = [] item = support.Item() try: if categoria == "anime": item.url = host + '/fetch_pages.php?request=episodes&d=1' item.args = "updated" return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return [] return itemlist
def newest(categoria): support.log(categoria) itemlist = [] item = support.Item() try: if categoria == "anime": item.url = host item.args = 'newest' itemlist = peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return [] return itemlist
def select(item): support.log() itemlist = [] json = current_session.get(item.url).json()['blocks'] for key in json: itemlist.append( support.Item(channel=item.channel, title=support.typo(key['name'], 'bold'), fulltitle=item.fulltitle, show=item.show, thumbnail=item.thumbnail, url=key['sets'], action='episodios', args=item.args)) if len(itemlist) == 1: return episodios(itemlist[0]) else: return itemlist
def replay_channels(item): support.log() itemlist = [] json = current_session.get(item.url).json()['dirette'] for key in json: itemlist.append( support.Item( channel=item.channel, title=support.typo(key['channel'], 'bold'), fulltitle=key['channel'], show=key['channel'], plot=item.title, action='replay', thumbnail=key['transparent-icon'].replace( "[RESOLUTION]", "256x-"), url='%s/palinsesto/app/old/%s/%s.json' % (host, key['channel'].lower().replace(' ', '-'), item.date))) return itemlist
def newest(categoria): support.log(categoria) itemlist = [] item = support.Item() try: if categoria == "series": item.url = host item.contentType = 'tvshow' itemlist = peliculas(item) itemlist.pop() # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return [] return itemlist
def findvideos(item): support.log(item) itemlist = [] if item.args == 'updated': ep = support.match(item.fulltitle, r'(Episodio\s*\d+)')[0][0] item.url = support.re.sub(r'episodio-\d+-|oav-\d+-', '', item.url) if 'streaming' not in item.url: item.url = item.url.replace('sub-ita', 'sub-ita-streaming') item.url = support.match( item, r'<a href="([^"]+)"[^>]+>', ep + '(.*?)</tr>', )[0][0] urls = support.match(item.url, r'(episodio\d*.php.*)')[0] for url in urls: url = host + '/' + url headers['Referer'] = url data = support.match(item, headers=headers, url=url)[1] cookies = "" matches = support.re.compile( '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), support.re.DOTALL).findall(support.config.get_cookie_data()) for cookie in matches: cookies += cookie.split('\t')[5] + "=" + cookie.split( '\t')[6] + ";" headers['Cookie'] = cookies[:-1] url = support.match(data, r'<source src="([^"]+)"[^>]+>' )[0][0] + '|' + support.urllib.urlencode(headers) itemlist.append( support.Item(channel=item.channel, action="play", title='diretto', quality='', url=url, server='directo', fulltitle=item.fulltitle, show=item.show)) return support.server(item, url, itemlist)
def findvideos(item): support.log() itemlist = [] urls = support.match(item, r'<a href="([^"]+)"><div class="downloadestreaming">', headers=headers)[0] if urls: links = support.match(item, r'(?:<source type="[^"]+"\s*src=|file:\s*)"([^"]+)"', url=urls[0], headers=headers)[0] for link in links: itemlist.append( support.Item(channel=item.channel, action="play", title='Diretto', quality='', url=link, server='directo', fulltitle=item.fulltitle, show=item.show, contentType=item.contentType, folder=False)) return support.server(item, itemlist=itemlist)
def newest(categoria): support.log(categoria) item = support.Item() try: if categoria == "series": item.contentType= 'tvshow' item.url = host + '/ultimi-episodi-aggiunti' item.args = "lastep" if categoria == "peliculas": item.contentType= 'movie' item.url = host + '/ultimi-film-aggiunti' item.args = "last" return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return []
def newest(categoria): support.info(categoria) itemlist = [] item = support.Item() item.url = host + '/aggiornamenti' item.args = 'last' try: if categoria == "series": item.contentType = 'tvshow' return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.infoger.error("{0}".format(line)) return [] return itemlist
def newest(categoria): support.info(categoria) item = support.Item() try: if categoria == "series": item.contentType = 'tvshow' item.url = host + '/serietv/' # aggiornamento-quotidiano-serie-tv/' else: item.contentType = 'movie' item.url = host + '/lista-film-ultimi-100-film-aggiunti/' item.args = "newest" return peliculas(item) # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return []
def findvideos(item): support.log(item) itemlist = [] if item.episode: from lib import unshortenit url, c = unshortenit.unshorten(item.url) url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.episode, url=url)[0] item.url = url[0] if url else '' if 'vvvvid' in item.url: item.action = 'play' itemlist.append(item) if 'http' not in item.url: if '//' in item.url[:2]: item.url = 'http:' + item.url elif host not in item.url: item.url = host + item.url if 'adf.ly' in item.url: item.url = adfly.get_long_url(item.url) elif 'bit.ly' in item.url: item.url = support.httptools.downloadpage( item.url, only_headers=True, follow_redirects=False).headers.get("location") matches = support.match(item, r'button"><a href="([^"]+)"')[0] for video in matches: itemlist.append( support.Item(channel=item.channel, action="play", title='diretto', url=video, server='directo')) return support.server(item, itemlist=itemlist)
def newest(categoria): support.info(categoria) itemlist = [] item = support.Item() item.url = host try: itemlist = news(item) if itemlist[-1].action == 'news': itemlist.pop() # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.info(line) return [] return itemlist
def dirette(item): support.log() itemlist = [] json = current_session.get(item.url).json()['dirette'] onAir = current_session.get(onair).json()['on_air'] for i, key in enumerate(json): itemlist.append( support.Item( channel=item.channel, title=support.typo(key['channel'], 'bold'), fulltitle=key['channel'], show=key['channel'], thumbnail=key['transparent-icon'].replace( "[RESOLUTION]", "256x-"), fanart=getUrl(onAir[i]['currentItem']['image']), url=key['video']['contentUrl'], plot=support.typo(onAir[i]['currentItem']['name'], 'bold') + '\n\n' + onAir[i]['currentItem']['description'], action='play')) return itemlist
def newest(categoria): support.log(categoria) itemlist = [] item = support.Item() item.url = host item.args = 'news' item.action = 'peliculas' try: itemlist = peliculas(item) if itemlist[-1].action == 'peliculas': itemlist.pop() # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.log({0}.format(line)) return [] return itemlist
def newest(categoria): support.log(categoria) itemlist = [] item = support.Item() try: if categoria == "anime": item.url = host item.args = "updated" itemlist = peliculas(item) if itemlist[-1].action == "ultimiep": itemlist.pop() # Continua l'esecuzione in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("{0}".format(line)) return [] return itemlist
def replay(item): support.log() itemlist = [] json = current_session.get( item.url).json()[item.fulltitle][0]['palinsesto'][0]['programmi'] for key in json: support.log('KEY=', key) if key and key['pathID']: itemlist.append( support.Item(channel=item.channel, thumbnail=getUrl(key['images']['landscape']), fanart=getUrl(key['images']['landscape']), url=getUrl(key['pathID']), title=support.typo(key['timePublished'], 'color kod bold') + support.typo(' | ' + key['name'], ' bold'), fulltitle=key['name'], show=key['name'], plot=key['testoBreve'], action='findvideos')) return itemlist
def findvideos(item): itemlist=[] if item.args == 'last': match = support.match(item, patron=r'href="(?P<url>[^"]+)"[^>]+><strong>DOWNLOAD & STREAMING</strong>').match if match: patronBlock = r'<h6>Episodio</h6>(?P<block>.*?)(?:<!--|</table>)' patron = r'<a href="http://link\.animetubeita\.com/2361078/(?P<url>[^"]+)"' match = support.match(match, patron=patron, patronBlock=patronBlock, headers=headers).match else: return itemlist if match: item.url = match[-1] else: return itemlist data = support.httptools.downloadpage(item.url, headers=headers).data cookies = "" matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(support.config.get_cookie_data()) for cookie in matches: name = cookie.split('\t')[5] value = cookie.split('\t')[6] cookies += name + "=" + value + ";" headers['Referer'] = item.url headers['Cookie'] = cookies[:-1] url = support.scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""") if not url: url = support.scrapertools.find_single_match(data, 'file: "([^"]+)"') if url: url += '|' + urllib.urlencode(headers) itemlist.append( support.Item(channel=item.channel, action="play", title='diretto', server='directo', quality='', url=url, thumbnail=item.thumbnail, fulltitle=item.fulltitle, show=item.show, contentType=item.contentType, folder=False)) return support.server(item, itemlist=itemlist)
def search(item, text): support.log() itemlist = [] try: if item.url: item.search = text itemlist = peliculas(item) else: json = current_session.get( host + '/dl/RaiTV/RaiPlayMobile/Prod/Config/programmiAZ-elenco.json' ).json() for key in json: for key in json[key]: if 'PathID' in key and (text.lower() in key['name'].lower()): itemlist.append( support.Item( channel=item.channel, title=support.typo(key['name'], 'bold'), fulltitle=key['name'], show=key['name'], url=key['PathID'].replace('/?json', '.json'), action='Type', thumbnail=getUrl( key['images']['portrait'] if 'portrait' in key['images'] else key['images'] ['portrait43'] if 'portrait43' in key['images'] else key['images'] ['landscape']), fanart=getUrl(key['images']['landscape'] if 'landscape' in key['images'] else key['images']['landscape43']))) except: import sys for line in sys.exc_info(): support.logger.error("%s" % line) return [] return itemlist
def findvideos(item): info() matches = support.match(item, patron=[ r'var ilinks\s?=\s?([^;]+)', r' href="#option-\d">([^\s]+)\s*([^\s]+)' ]).matches itemlist = [] list_url = [] list_quality = [] list_servers = [] for match in matches: if type(match) == tuple: list_servers.append(match[0]) list_quality.append(match[1]) else: import ast, base64 encLinks = ast.literal_eval(match) for link in encLinks: linkDec = base64.b64decode(link.encode()).decode() if 'player.php' in linkDec: linkDec = support.httptools.downloadpage( linkDec, only_headers=True, follow_redirects=False).headers.get('Location') if linkDec: list_url.append(linkDec) if list_servers: for i, url in enumerate(list_url): itemlist.append( support.Item(channel=item.channel, title=list_servers[i], url=url, action='play', quality=list_quality[i], infoLabels=item.infoLabels)) return support.server(item, itemlist=itemlist)
def news(item): support.info() item.contentType = 'tvshow' itemlist = [] fullJs = json.loads(support.match(item, headers=headers, patron=r'items-json="([^"]+)"').match.replace('"','"')) js = fullJs['data'] for it in js: itemlist.append( support.Item(channel=item.channel, title= support.typo(it['anime']['title'] + ' - EP. ' + it['number'], 'bold'), fulltitle=it['anime']['title'], server='directo', thumbnail=it['anime']['imageurl'], forcethumb = True, url=it['link'], plot=it['anime']['plot'], action='play') ) if 'next_page_url' in fullJs: itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),thumbnail=support.thumb(), url=fullJs['next_page_url'])) return itemlist
def newest(category): logger.debug(category) itemlist = [] item = support.Item() item.args = 1 if category == 'peliculas': item.url = host + '/film' else: item.url = host + '/serie-tv' try: itemlist = peliculas(item) if itemlist[-1].action == 'peliculas': itemlist.pop() # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): logger.error(line) return [] return itemlist
def episodios(item): support.info() itemlist = [] title = 'Parte ' if item.type.lower() == 'movie' else 'Episodio ' for it in item.episodes: itemlist.append( support.Item(channel=item.channel, title=support.typo(title + it['number'], 'bold'), episode=it['number'], fulltitle=item.title, show=item.title, contentTitle='', contentSerieName=item.contentSerieName, thumbnail=item.thumbnail, plot=item.plot, action='findvideos', contentType='episode', url=it['link'])) autorenumber.start(itemlist, item) support.videolibrary(itemlist, item) support.download(itemlist, item) return itemlist
def findvideos(item): support.log() html = support.match(item, patron=r'TIPO:\s*</b>\s*([A-Za-z]+)') if html.match == 'TV' and item.contentType != 'episode': item.contentType = 'tvshow' item.data = html.data return episodios(item) else: itemlist = [] if item.contentType != 'episode': item.contentType = 'movie' video = support.match(html.data, patron=r'<source src="([^"]+)"').match itemlist.append( support.Item(channel=item.channel, action="play", title='Diretto', quality='', url=video, server='directo', fulltitle=item.fulltitle, show=item.show, contentType=item.contentType, folder=False)) return support.server(item, itemlist=itemlist)
def findvideos(item): support.log(item) itemlist = [] if item.args == 'updated': ep = support.match(item.fulltitle, patron=r'(\d+)').match item.url = support.re.sub(r'episodio-\d+-|oav-\d+-' + ep, '', item.url) if 'streaming' not in item.url: item.url = item.url.replace('sub-ita', 'sub-ita-streaming') item.url = support.match(item, patron=ep + r'[^>]+>[^>]+>[^>]+><a href="([^"]+)"').match # post url = host + '/' + support.match( item.url, patron=r'(episodio\d*.php.*?)"').match.replace( '%3F', '?').replace('%3D', '=') headers['Referer'] = url cookies = "" matches = support.re.compile( '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), support.re.DOTALL).findall(support.config.get_cookie_data()) for cookie in matches: cookies += cookie.split('\t')[5] + "=" + cookie.split('\t')[6] + ";" headers['Cookie'] = cookies[:-1] url = support.match(url, patron=r'<source src="([^"]+)"[^>]+>').match itemlist.append( support.Item(channel=item.channel, action="play", title='Diretto', url=url + '|' + support.urllib.urlencode(headers), server='directo')) return support.server(item, itemlist=itemlist)
def addinfo(key, item): support.log() info = current_session.get(getUrl(key['info_url'])).json() if not item.search or item.search.lower() in key['name'].lower(): it = support.Item( channel=item.channel, title=support.typo(key['name'], 'bold'), fulltitle=key['name'], show=key['name'], thumbnail=getUrl( key['images']['portrait_logo'] if key['images'] ['portrait_logo'] else key['images']['landscape']), fanart=getUrl(key['images']['landscape']), url=getUrl(key['path_id']), plot=info['description']) if 'layout' not in key or key['layout'] == 'single': it.action = 'findvideos' it.contentType = 'movie' it.contentTitle = it.fulltitle else: it.action = 'select' it.contentType = 'tvshow' it.contentSerieName = it.fulltitle return it
def findvideos(item): support.log() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'\n|\t', ' ', data) data = re.sub(r'>\s\s*<', '><', data) patronBlock = r'LINK STREAMING(?P<block>.*?)LINK DOWNLOAD' patron = r'href="(.+?)"' block = scrapertoolsV2.find_single_match(data, patronBlock) urls = scrapertoolsV2.find_multiple_matches(block, patron) for url in urls: lang = '' if 'sub_ita' in url.lower(): lang = 'Sub-ITA' else: lang = 'ITA' if 'keepem.online' in data: urls = scrapertoolsV2.find_multiple_matches( data, r'(https://keepem\.online/f/[^"]+)"') for url in urls: url = httptools.downloadpage(url).url itemlist += servertools.find_video_items(data=url) elif 'keepsetsu' in url.lower() or 'woof' in url.lower(): if 'keepsetsu' in url.lower(): support.log("keepsetsu url -> ", url) data = httptools.downloadpage(url).url support.log("LINK-DATA :", data) data = httptools.downloadpage(data).data support.log("LINK-DATA2 :", data) video_urls = scrapertoolsV2.find_single_match( data, r'<meta name="description" content="([^"]+)"') else: data = httptools.downloadpage(url).data host_video = scrapertoolsV2.find_single_match( data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"') link = scrapertoolsV2.find_single_match(data, r'<video src="([^"]+)"') video_urls = host_video + link title = support.typo(item.fulltitle, '_ bold') + support.typo( lang, '_ [] color kod') itemlist.append( support.Item( channel=item.channel, action="play", contentType=item.contentType, title=title, fulltitle=title, show=title, url=video_urls, infoLabels=item.infoLabels, thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, contentTitle=title, contentLanguage='ITA' if lang == [] else lang, args=item.args, server='directo', )) return itemlist
def findvideos(item): support.log(item) # try: # from urlparse import urljoin # except: # from urllib.parse import urljoin # support.dbg() itemlist = [] if 'vvvvid' in item.url: import requests from lib import vvvvid_decoder if support.match(item.url, string=True, patron=r'(\d+/\d+)').match: item.action = 'play' itemlist.append(item) else: # VVVVID vars vvvvid_host = 'https://www.vvvvid.it/vvvvid/ondemand/' vvvvid_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0' } # VVVVID session current_session = requests.Session() login_page = 'https://www.vvvvid.it/user/login' conn_id = current_session.get( login_page, headers=vvvvid_headers).json()['data']['conn_id'] payload = {'conn_id': conn_id} # collect parameters show_id = support.match(item.url, string=True, patron=r'(\d+)').match ep_number = support.match(item.title, patron=r'(\d+)').match json_file = current_session.get(vvvvid_host + show_id + '/seasons/', headers=vvvvid_headers, params=payload).json() season_id = str(json_file['data'][0]['season_id']) json_file = current_session.get(vvvvid_host + show_id + '/season/' + season_id + '/', headers=vvvvid_headers, params=payload).json() # select the correct episode for episode in json_file['data']: support.log('Number', int(episode['number']), int(ep_number)) if int(episode['number']) == int(ep_number): url = vvvvid_decoder.dec_ei(episode['embed_info'] or episode['embed_info']) if 'youtube' in url: item.url = url item.url = url.replace('manifest.f4m', 'master.m3u8').replace( 'http://', 'https://').replace( '/z/', '/i/') if 'https' not in item.url: url = support.match( item, url='https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/playlist.m3u')[1] url = url.split()[-1] itemlist.append( support.Item( action='play', url= 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url, server='directo')) elif 'adf.ly' in item.url: from servers.decrypters import adfly url = adfly.get_long_url(item.url) elif 'bit.ly' in item.url: url = support.httptools.downloadpage( item.url, only_headers=True, follow_redirects=False).headers.get("location") else: url = host for u in item.url.split('/'): # support.log(i) if u and 'animeforce' not in u and 'http' not in u: url += '/' + u if 'php?' in url: url = support.httptools.downloadpage( url, only_headers=True, follow_redirects=False).headers.get("location") url = support.match( url, patron=r'class="button"><a href=(?:")?([^" ]+)', headers=headers).match else: url = support.match(url, patron=[ r'<source src=(?:")?([^" ]+)', r'name="_wp_http_referer" value="([^"]+)"' ]).match if url.startswith('//'): url = 'https:' + url elif url.startswith('/'): url = 'https:/' + url itemlist.append( support.Item(channel=item.channel, action="play", title='Diretto', url=url, server='directo')) return support.server(item, itemlist=itemlist)