def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url).data patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">' url = scrapertoolsV2.find_single_match(data, patron).replace("?seriehd", "") seasons = support.match(item, r'<a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] for season_url, season in seasons: season_url = urlparse.urljoin(url, season_url) episodes = support.match(item, r'<a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) itemlist.append( Item(channel=item.channel, action="findvideos", contentType="episode", title=support.typo(title + ' - ' + item.show, 'bold'), url=episode_url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail)) support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def episodios(item): support.info() itemlist = [] js = json.loads( support.match(item.url, patron=r'seasons="([^"]+)').match.replace('"', '"')) support.info(js) for episodes in js: for it in episodes['episodes']: support.info(it) itemlist.append( support.Item(channel=item.channel, title=support.typo( str(episodes['number']) + 'x' + str(it['number']).zfill(2) + ' - ' + it['name'], 'bold'), episode=it['number'], season=episodes['number'], thumbnail='https://image.tmdb.org/t/p/w1280' + it['images'][0]['url'], fanart='https://image.tmdb.org/t/p/w1280' + it['images'][0]['url'], plot=it['plot'], action='findvideos', contentType='episode', url=host + '/watch/' + str(episodes['title_id']) + '?e=' + str(it['id']))) support.videolibrary(itemlist, item) support.download(itemlist, item) return itemlist
def episodios(item): logger.info("[filmigratis.py] episodios") itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data block = scrapertools.find_single_match( data, r'<div class="row">(.*?)<section class="main-content">') patron = r'href="(.*?)".*?(S[^<]+) <' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedurl, scrapedtitle in matches: scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) scrapedtitle = scrapedtitle.replace("S0", "") scrapedtitle = scrapedtitle.replace(" - EP ", "x") itemlist.append( Item(channel=item.channel, action="findvideos", contentType='episode', title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, thumbnail=item.thumb, args=item.args, show=item.title)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.videolibrary(itemlist, item, 'color kod') return itemlist
def player_list(item): itemlist = [] # Scarico la pagina data = httptools.downloadpage(item.url, headers=headers).data if "panel_toggle toggleable" in data: # Prelevo il blocco lista puntate block = scrapertools.find_single_match(data, r'panel_toggle toggleable.*?(<div.*?)<!-- Javascript -->') patron = r'data-url="([^"]+)">.*?([A-Z].*?) ' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedurl, scrapedtitle in matches: scrapedtitle = re.sub('mp4|avi|mkv', '', scrapedtitle) scrapedtitle = re.sub('WebRip|WEBRip|x264|AC3|1080p|DLMux|XviD-|BDRip|BluRay|HD|WEBMux|H264|BDMux|720p|TV|NFMux|DVDRip|DivX|DVDip|Ac3|Dvdrip|Mux|NovaRip|DVD|SAT|Divx', '', scrapedtitle) scrapedtitle = re.sub('ITA|ENG|Italian|SubITA|SUBITA|iTALiAN|LiAN|Ita', '', scrapedtitle) scrapedtitle = re.sub('Pir8|UBi|M L|BEDLAM|REPACK|DD5.1|bloody|SVU', '', scrapedtitle) scrapedtitle = scrapedtitle.replace(".", " ").replace(" - ", " ").replace(" -", "").replace(" ", "") itemlist.append( Item(channel=__channel__, action="halfplayer", contentType=item.contentType, title=scrapedtitle, thumbnail=item.thumbnail, fulltitle=scrapedtitle, url="https://mondolunatico.tk" + scrapedurl, show=item.show)) support.videolibrary(itemlist, item, 'color kod') return itemlist else: return player(item)
def episodios(item): support.info() itemlist = [] if type(item.data) in [list, dict] and len(item.data) > 1 and ('name' in item.data[0] and 'stagione' not in item.data[0]['name'].lower()): for key in item.data: itemlist.append(item.clone(title = support.typo(key['name'], 'bold'), url = getUrl(key['path_id']), contentType = 'tvshow', action = 'episodios')) elif type(item.data) in [list, dict]: for key in item.data: load_episodes(key, item) with futures.ThreadPoolExecutor() as executor: itlist = [executor.submit(load_episodes, key, item) for key in item.data] for res in futures.as_completed(itlist): if res.result(): itemlist += res.result() if itemlist and itemlist[0].VL: # itemlist.reverse() itemlist = sorted(itemlist, key=lambda it: it.order) item.action = 'episodios' support.videolibrary(itemlist, item) else: itemlist = sorted(itemlist, key=lambda it: it.title) else: date = '' if type(item.data) in [list, dict]: item.data = getUrl(item.url[0]['path_id']) json = current_session.get(item.url).json()['items'] for key in json: ep = support.match(key['subtitle'], patron=r'(?:St\s*(\d+))?\s*Ep\s*(\d+)').match if ep: season = '1' if not ep[0] else ep[0] episode = ep[1].zfill(2) title = support.re.sub(r'(?:St\s*\d+)?\s*Ep\s*\d+','',key['subtitle']) title = season + 'x' + episode + (' - ' + title if not title.startswith(' ') else title if title else '') elif item.season and support.match(item.title.lower(), patron =r'(puntate)').match: title = key['subtitle'].strip() if not title: title = key['name'] date = support.match(title, patron=r'(\d+/\d+/\d+)').match if date: date = title.split('/') date = date[2][-2] + '/' + date[1] + '/' + date[0] else: title = key['subtitle'].strip() if not title: title = key['name'] itemlist.append(item.clone(title = support.typo(title, 'bold'), action = 'findvideos', VL=True if ep else False, plot = key['description'], fanart = getUrl(key['images']['landscape']), url = key['video_url'], contentType = 'episode', date=date)) if item.season and support.match(item.title.lower(), patron =r'(puntate)').match: itemlist = sorted(itemlist, key=lambda it: it.date) for i, it in enumerate(itemlist): episode = str(i + 1) it.title = support.typo(item.season + 'x' + episode, 'bold') + (' - ' + it.title) if itemlist and itemlist[0].VL: support.videolibrary(itemlist, item) if itemlist and not support.match(itemlist[0].title, patron=r'[Ss]?(\d+)(?:x|_|\.|\s+)[Ee]?[Pp]?(\d+)').match and inspect.stack()[1][3] not in ['find_episodes']: autorenumber.start(itemlist, item) return itemlist
def episodios(item): log() itemlist = [] patron_block = r'<div class="widget servers".*?>(.*?)<div id="download"' patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<' matches = support.match(item, patron, patron_block)[0] for scrapedurl, scrapedtitle in matches: itemlist.append( Item( channel=item.channel, action="findvideos", contentType="episode", title='[B] Episodio ' + scrapedtitle + '[/B]', url=urlparse.urljoin(host, scrapedurl), fulltitle=scrapedtitle, show=scrapedtitle, plot=item.plot, fanart=item.thumbnail, thumbnail=item.thumbnail)) autorenumber.renumber(itemlist, item,'bold') support.videolibrary(itemlist, item) return itemlist
def videoplayer(item): support.log() itemlist = [] for link in support.dooplay_get_links(item, host): server = link['server'][:link['server'].find(".")] if server == "": server = "mondolunatico" itemlist.append( Item(channel=item.channel, action="player" if "mondolunatico" in server else "play", title=server + " [COLOR blue][" + link['title'] + "][/COLOR]", url=link['url'], server=server, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, quality=link['title'], contentType=item.contentType, folder=False)) support.videolibrary(itemlist, item, 'color kod', function_level=2) autoplay.start(itemlist, item) return itemlist
def episodios(item): support.info() itemlist = [] episode = '' json = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs?byCustomValue={subBrandId}{' + item.url + '}').json()['entries'] for it in json: urls = [] if 'media' in it: for key in it['media']: urls.append(key['publicUrl']) if urls: title = it['title'].split('-')[-1].strip() if it['tvSeasonNumber'] and it['tvSeasonEpisodeNumber']: item.infoLabels['season'] = it['tvSeasonNumber'] item.infoLabels['episode'] = it['tvSeasonEpisodeNumber'] episode = '%dx%02d - ' % (it['tvSeasonNumber'], it['tvSeasonEpisodeNumber']) itemlist.append( item.clone(action='findvideos', title=support.typo(episode + title, 'bold'), contentType='episode', thumbnail=it['thumbnails']['image_vertical-264x396']['url'] if 'image_vertical-264x396' in it['thumbnails'] else '', fanart=it['thumbnails']['image_keyframe_poster-1280x720']['url'] if 'image_keyframe_poster-1280x720' in it['thumbnails'] else '', plot=it['longDescription'] if 'longDescription' in it else it['description'], urls=urls, url=it['mediasetprogram$pageUrl'])) if episode: support.videolibrary(itemlist, item) return sorted(itemlist, key=lambda it: it.title)
def anime(item): log() itemlist = [] seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0] for season in seasons: episodes = scrapertools.find_multiple_matches( season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)') for url, title, urls, none in episodes: urls = scrapertools.find_multiple_matches( urls, '<a.*?href="([^"]+)"[^>]+>') for url2 in urls: url += url2 + '\n' #log('EP URL',url) itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contentType, title=support.typo(title + ' - ' + item.fulltitle, 'bold'), url=url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail, args=item.args)) autorenumber.renumber(itemlist, item, 'bold') support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def episodios(item): domain, id, season, episode = scrapertoolsV2.find_single_match( item.url, r'(https?://[a-z0-9.-]+).*?/([^-/]+)-S([0-9]+)-([0-9]+)$') itemlist = [] for n in range(1, int(episode)): url = domain + '/play_s.php?s=' + id + '-S' + season + '&e=' + str(n) itemlist.append( Item(channel=item.channel, action="findvideos", title=str(int(season)) + 'x' + str(n) + support.typo(item.quality, '-- [] color kod'), url=url, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, quality=item.quality, contentType=item.contentType, folder=False, args={ 'id': id, 'season': season, 'episode': episode })) support.videolibrary(itemlist, item) return itemlist
def episodios(item): # getHeaders() logger.debug() itemlist = [] js = json.loads( support.match(item.url, patron=r'seasons="([^"]+)').match.replace('"', '"')) for episodes in js: for it in episodes['episodes']: itemlist.append( support.Item( channel=item.channel, title=support.typo( str(episodes['number']) + 'x' + str(it['number']).zfill(2) + ' - ' + it['name'], 'bold'), episode=it['number'], season=episodes['number'], thumbnail=it['images'][0]['original_url'] if 'images' in it and 'original_url' in it['images'][0] else item.thumbnail, fanart=item.fanart, plot=it['plot'], action='findvideos', contentType='episode', contentSerieName=item.fulltitle, url=host + '/watch/' + str(episodes['title_id']), episodeid='?e=' + str(it['id']))) support.videolibrary(itemlist, item) support.download(itemlist, item) return itemlist
def episodios(item): logger.debug() itemlist = [] # url = '{}/api/anime/{}'.format(host, item.id) json = httptools.downloadpage(item.url, CF=False).json if type(json) == list: item.show_renumber = False itemlist = list_episodes(item, json) elif json.get('seasons'): seasons = json['seasons'] seasons.sort(key=lambda s: s['episodeStart']) for it in seasons: title = it['name'] itemlist.append( item.clone(title=title, id='{}/season/{}'.format(it['animeId'], it['id']), contentType='season', action='list_episodes', plot=json['storyline'], year=it['yearStart'], show_renumber=True)) # If the call come from the videolibrary or autorenumber, shows the episodes if stack()[1][3] in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes' ]: itlist = [] with futures.ThreadPoolExecutor() as executor: eplist = [] for ep in itemlist: ep.show_renumber = False eplist.append(executor.submit(list_episodes, ep)) for res in futures.as_completed(eplist): if res.result(): itlist.extend(res.result()) itemlist = itlist elif json.get('episodes'): itemlist = list_episodes(item, json) # add renumber option if stack()[1][3] not in [ 'find_episodes' ] and itemlist and itemlist[0].contentType == 'episode': autorenumber.start(itemlist, item) # add add to videolibrary menu if stack()[1][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes' ]: support.videolibrary(itemlist, item) return itemlist
def episodios(item): support.log() itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data if "<h2>Stagioni ed Episodi</h2>" in data: # Se è presente direttamente la lista Stagioni con i relativi episodi block = scrapertools.find_single_match( data, r'<h2>Stagioni ed Episodi</h2>(.*?)<div class=\'sbox\'>') patron = r'episodiotitle.*?href=\'([^\']+)\'>([^<]+)' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedurl, scrapedtitle in matches: itemlist.append( Item(channel=__channel__, action="videoplayer", contentType=item.contentType, title=scrapedtitle, thumbnail=item.thumbnail, fulltitle=scrapedtitle, url=scrapedurl, args=item.args, show=item.show)) support.videolibrary(itemlist, item, 'color kod') return itemlist if "File Unico..." in data: #Se è direttamente un file unico return dooplayer(item) if "http://mondolunatico.org/stream/wp-content/uploads/2017/08/hand.gif" in data: # Keeplinks return keeplink(item) else: # Se nella lista è presente Dooplayer con elenco episodi patron = r'<div class="sp-head" title="Espandi">([^<]+).*?<iframe.*?src="([^"]+)' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) > 1: for scrapedtitle, scrapedurl in matches: itemlist.append( Item(channel=__channel__, action="player_list", contentType=item.contentType, title=scrapedtitle, thumbnail=item.thumbnail, fulltitle=scrapedtitle, url=scrapedurl, show=item.show)) return itemlist else: return dooplayer(item)
def episodios(item): log() itemlist = [] if item.args == 'anime': return anime(item) data = httptools.downloadpage(item.url).data # Check if is series check = scrapertoolsV2.find_single_match( data.replace('\t', '').replace('\n', ''), r'<div class="category-film"><h3>([^<]+)<\/h3>') if 'serie tv' not in check.lower(): return findvideos(item) elif 'anime' in check.lower(): return findvideos(item) patron = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">' url = scrapertoolsV2.find_single_match(data, patron) log('URL =', url) seasons = support.match( item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=url)[0] for season_url, season in seasons: season_url = urlparse.urljoin(url, season_url) episodes = support.match( item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Episodio<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=season_url)[0] for episode_url, episode in episodes: episode_url = urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) itemlist.append( Item(channel=item.channel, action="findvideos", contentType=item.contentType, title=support.typo(title + ' - ' + item.fulltitle, 'bold'), url=episode_url, fulltitle=title + ' - ' + item.show, show=item.show, thumbnail=item.thumbnail)) support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def findvideos(item): support.log() itemlist = support.hdpass_get_servers(item) if checklinks: itemlist = servertools.check_list_links(itemlist, checklinks_number) # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) support.videolibrary(itemlist, item ,'color kod bold') return itemlist
def episodios(item): support.log() itemlist = [] data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) for season in json_object['seasons']: seas_url=host+season['@id']+'/releases' itemlist_season=get_season(item, seas_url, season['seasonNumber']) if(len(itemlist_season)>0): itemlist.extend(itemlist_season) support.videolibrary(itemlist, item, 'color kod bold') return itemlist
def episodios(item): url = item.url item.cercaSerie = True itemlist = search(item, item.fulltitle.replace("'", "")) stagioni = {} for i in itemlist[:-1]: spl1 = i.url.split('-') if len(spl1) > 3: st = spl1[1] + '-' + spl1[2] else: st = spl1[-2] nEp = int(spl1[-1]) if st not in stagioni.keys(): stagioni[st] = nEp elif nEp > stagioni[st]: stagioni[st] = nEp itemlist = [] domain, id = scrapertools.find_single_match( url, r'(https?://[a-z0-9.-]+)/[^/]+/([^-/]+)') for st in sorted(stagioni.keys()): season = st[1:] episode = stagioni[st] for n in range(1, int(episode)): url = domain + '/play_s.php?s=' + id + '-S' + season + '&e=' + str( n) if '-' in season: # vedi https://stpgs.ml/SerieTv/Atypical-S01-8-8.html season = season.split('-')[0] itemlist.append( Item(channel=item.channel, action="findvideos", title=str(int(season)) + 'x' + str(n) + support.typo(item.quality, '-- [] color kod'), url=url, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, quality=item.quality, contentType='episode', folder=True, args={ 'id': id, 'season': season, 'episode': episode })) support.videolibrary(itemlist, item) return itemlist
def episodios(item): log() itemlist = [] data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)') # movie or series movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie') data = httptools.downloadpage(host + "/loading_anime?anime_id=" + anime_id, headers={ 'X-Requested-With': 'XMLHttpRequest' }).data patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*' patron += r'<td style="[^"]+"><a href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedtitle, scrapedurl in matches: scrapedtitle = cleantitle(scrapedtitle) scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle) scrapedtitle = '[B]' + scrapedtitle + '[/B]' itemlist.append( Item(channel=item.channel, action="findvideos", contentType="episode", title=scrapedtitle, url=urlparse.urljoin(host, scrapedurl), fulltitle=scrapedtitle, show=scrapedtitle, plot=item.plot, fanart=item.thumbnail, thumbnail=item.thumbnail)) if ((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType != 'movie': item.url = itemlist[0].url item.contentType = 'movie' return findvideos(item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) autorenumber.renumber(itemlist, item) support.videolibrary(itemlist, item, 'bold color kod') return itemlist
def episodios(item): itemlist = [] json_file = current_session.get(item.url, headers=headers, params=payload).json() for i, block in enumerate(json_file['data']): if len(json_file['data']) > 1: prepend = str(i + 1) + 'x' else: prepend = 'Episodio ' show_id = str(block['show_id']) season_id = str(block['season_id']) episodes = [] support.info('SEASON ID= ', season_id) for episode in json_file['data']: episodes.append(episode['episodes']) for episode in episodes: for key in episode: if 'stagione' in encode(key['title']).lower(): season = support.match(encode(key['title']), patron=r'[Ss]tagione\s*(\d+)').match episode = support.match( encode(key['title']), patron=r'[Ee]pisodio\s*(\d+)').match if season and episode: title = season + 'x' + episode + ' - ' + item.fulltitle make_item = True elif int(key['season_id']) == int(season_id): try: title = prepend + key['number'] + ' - ' + key[ 'title'].encode('utf8') except: title = prepend + key['number'] + ' - ' + key['title'] make_item = True else: make_item = False if make_item == True: if type(title) == tuple: title = title[0] itemlist.append( item.clone(title=title, url=host + show_id + '/season/' + str(key['season_id']), action='findvideos', video_id=key['video_id'])) autorenumber.start(itemlist, item) if autorenumber.check(item) == True \ or support.match(itemlist[0].title, patron=r"(\d+x\d+)").match: support.videolibrary(itemlist, item) return itemlist
def get_seasons(item): logger.debug() itemlist = [] infoLabels = item.infoLabels json = item.url if type(item.url) == dict else item.url if 'seasons_list' in json: json = json['seasons_list'] elif 'tvshows_list' in json: return show_menu(item) for option in json: infoLabels['season'] = option['season'] title = config.get_localized_string(60027) % option['season'] extra = set_extra_values(item, option, item.path) # url = relative('link', option, item.path) itemlist.append( Item(channel=item.channel, title=set_title(title), fulltitle=item.fulltitle, show=item.show, thumbnail=extra.thumb, filterseason=int(option['season']), url=extra.url, action='episodios', contentSeason=option['season'], infoLabels=infoLabels, contentType='season' if show_seasons else 'tvshow', path=extra.path)) if inspect.stack()[2][3] in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest' ] or show_seasons == False: itlist = [] for item in itemlist: itlist = episodios(item) itemlist = itlist if inspect.stack()[2][3] not in [ 'add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest' ] and defp and not item.disable_pagination: itemlist = pagination(item, itemlist) # if show_seasons: support.videolibrary(itemlist, item) support.download(itemlist, item) return itemlist
def episodios(item): itemlist = [] json_file = current_session.get(item.url, headers=headers, params=payload).json() show_id = str(json_file['data'][0]['show_id']) season_id = str(json_file['data'][0]['season_id']) episodes = [] support.log('SEASON ID= ', season_id) for episode in json_file['data']: episodes.append(episode['episodes']) for episode in episodes: for key in episode: if 'stagione' in encode(key['title']).lower(): match = support.match( encode(key['title']), patron=r'[Ss]tagione\s*(\d+) - [Ee]pisodio\s*(\d+)').match title = match[0] + 'x' + match[1] + ' - ' + item.fulltitle make_item = True elif int(key['season_id']) == int(season_id): try: title = 'Episodio ' + key['number'] + ' - ' + key[ 'title'].encode('utf8') except: title = 'Episodio ' + key['number'] + ' - ' + key['title'] make_item = True else: make_item = False if make_item == True: if type(title) == tuple: title = title[0] itemlist.append( Item(channel=item.channel, title=title, fulltitle=item.fulltitle, show=item.show, url=host + show_id + '/season/' + str(key['season_id']) + '/', action='findvideos', video_id=key['video_id'], thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, contentType=item.contentType)) autorenumber.renumber(itemlist, item, 'bold') if autorenumber.check(item) == True \ or support.match(itemlist[0].title, patron=r"(\d+x\d+)").match: support.videolibrary(itemlist, item) return itemlist
def episodios(item): log() itemlist = [] patron = r'<option value="(\d+)"[\sselected]*>.*?</option>' matches, data = support.match(item, patron, headers=headers) for value in matches: patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value blocco = scrapertools.find_single_match(data, patron) log(blocco) patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)"(?:\sdata-original="([^"]+)")?\sclass="[^"]+">)[^>]+>[^>]+>([^<]+)<' matches = scrapertools.find_multiple_matches(blocco, patron) for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches: contentlanguage = '' if 'sub-ita' in scrapedtitle.lower(): contentlanguage = 'Sub-ITA' scrapedtitle = scrapedtitle.replace(contentlanguage, '') number = cleantitle(scrapedtitle.replace("Episodio", "")).strip() title = value + "x" + number.zfill(2) title += " " + support.typo( contentlanguage, '_ [] color kod') if contentlanguage else '' infoLabels = {} infoLabels['episode'] = number.zfill(2) infoLabels['season'] = value itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=scrapedtitle, contentType="episode", url=scrapedurl, thumbnail=scrapedimg, extra=scrapedextra, infoLabels=infoLabels, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.videolibrary(itemlist, item, 'bold color kod') return itemlist
def episodios(item): log() itemlist = [] patron = r'<div\sclass="[^"]+">\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s' patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?' patron += r'(?:<img\sclass="[^"]+" meta-src="([^"]+)"[^>]+>|<img\sclass="[^"]+" src="" data-original="([^"]+)"[^>]+>)?' matches = support.match(item, patron, headers=headers)[0] for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches: scrapedtitle = cleantitle(scrapedtitle) scrapedepisode = scrapedepisode.zfill(2) scrapedepisodetitle = cleantitle(scrapedepisodetitle) title = str( "%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip() if 'SUB-ITA' in scrapedtitle: title += " " + support.typo("Sub-ITA", '_ [] color kod') infoLabels = {} infoLabels['season'] = scrapedseason infoLabels['episode'] = scrapedepisode itemlist.append( Item(channel=item.channel, action="findvideos", title=support.typo(title, 'bold'), fulltitle=scrapedtitle, url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3, contentType="episode", plot=scrapedplot, contentSerieName=scrapedserie, contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '', infoLabels=infoLabels, thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.videolibrary(itemlist, item) return itemlist
def dooplayer(item): support.log() itemlist = [] url = item.url data = httptools.downloadpage(url, headers=headers).data link = scrapertools.find_single_match( data, r'(https://mondolunatico.tk/./[^"]+)') data = httptools.downloadpage(link, headers=headers).data if "panel_toggle toggleable" in data: item.url = link return player_list(item) # Correggo il link con il lin del POST link1 = link.replace("/v/", "/api/source/").replace("/p/", "/api/source/") postData = urllib.urlencode({ "r": link, "d": "modolunatico.tk", }) block = httptools.downloadpage(link1, post=postData).data patron = r'"file":".*?\/(r[^"]+)' matches = re.compile(patron, re.DOTALL).findall(block) for scrapedurl in matches: scrapedurl = "https://fvs.io/" + scrapedurl itemlist.append( Item(channel=__channel__, action="play", contentType=item.contentType, title=item.title, thumbnail=item.thumbnail, fulltitle=item.title, url=scrapedurl, show=item.show)) autoplay.start(itemlist, item) support.videolibrary(itemlist, item, 'color kod') return itemlist
def findvideos(item): logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item)) itemlist = [] # scarico la pagina data = httptools.downloadpage(item.url, headers=headers).data # da qui fare le opportuni modifiche patron = '<a href="#" data-link="(.*?)">' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: logger.info("altadefinizione01_club scrapedurl log: %s" % scrapedurl) try: itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: logger.info("Videoitemlist2: %s" % videoitem) videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title) videoitem.show = item.show videoitem.contentTitle = item.contentTitle videoitem.contentType = item.contentType videoitem.channel = item.channel videoitem.year = item.infoLabels['year'] videoitem.infoLabels['plot'] = item.infoLabels['plot'] except AttributeError: logger.error("data doesn't contain expected URL") # Controlla se i link sono validi if checklinks: itemlist = servertools.check_list_links(itemlist, checklinks_number) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) # Aggiunge alla videoteca if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support( ) and len(itemlist) != 0: support.videolibrary(itemlist, item) return itemlist
def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data matches = scrapertoolsV2.find_multiple_matches( data, r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>' ) for match in matches: support.log(match) blocks = scrapertoolsV2.find_multiple_matches( match, '(?:<p>)(.*?)(?:</p>|<br)') season = scrapertoolsV2.find_single_match( match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip() for block in blocks: episode = scrapertoolsV2.find_single_match( block, r'([0-9]+(?:×|×)[0-9]+)').strip() seasons_n = scrapertoolsV2.find_single_match( block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip() if seasons_n: season = seasons_n if not episode: continue season = re.sub(r'–|–', "-", season) itemlist.append( Item(channel=item.channel, action="findvideos", contentType='episode', title="[B]" + episode + "[/B] " + season, fulltitle=episode + " " + season, show=episode + " " + season, url=block, extra=item.extra, thumbnail=item.thumbnail, infoLabels=item.infoLabels)) support.videolibrary(itemlist, item) return itemlist
def episodios(item): # getHeaders() logger.debug() itemlist = [] js = json.loads( support.match(item.url, patron=r'seasons="([^"]+)').match.replace('"', '"')) for episodes in js: logger.debug(jsontools.dump(js)) for it in episodes['episodes']: itemlist.append( item.clone( title=support.typo( str(episodes['number']) + 'x' + str(it['number']).zfill(2) + ' - ' + support.cleantitle(it['name']), 'bold'), episode=it['number'], season=episodes['number'], contentSeason=episodes['number'], contentEpisodeNumber=it['number'], thumbnail=it['images'][0]['original_url'] if 'images' in it and 'original_url' in it['images'][0] else item.thumbnail, contentThumbnail=item.thumbnail, fanart=item.fanart, contentFanart=item.fanart, plot=it['plot'], action='findvideos', contentType='episode', contentSerieName=item.fulltitle, url=host + '/watch/' + str(episodes['title_id']), episodeid='?e=' + str(it['id']))) if config.get_setting('episode_info') and not support.stackCheck( ['add_tvshow', 'get_newest']): support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) support.check_trakt(itemlist) support.videolibrary(itemlist, item) support.download(itemlist, item) return itemlist
def episodios(item): itemlist = [] if item.episodes: episodes = item.episodes show_id = item.show_id season_id = item.season_id else: json_file = current_session.get(item.url, headers=headers, params=payload).json()['data'] if len(json_file) > 1: for key in json_file: itemlist.append( item.clone(title=support.typo(key['name'], 'bold'), show_id=str(key['show_id']), season_id=str(key['season_id']), episodes=key['episodes'])) return itemlist else: episodes = json_file[0]['episodes'] show_id = str(json_file[0]['show_id']) season_id = str(json_file[0]['season_id']) for episode in episodes: try: title = 'Episodio ' + episode['number'] + ' - ' + episode[ 'title'].encode('utf8') except: title = 'Episodio ' + episode['number'] + ' - ' + episode['title'] if type(title) == tuple: title = title[0] itemlist.append( item.clone(title=support.typo(title, 'bold'), url=main_host + show_id + '/season/' + str(season_id), action='findvideos', video_id=episode['video_id'])) if inspect.stack()[1][3] not in ['find_episodes']: autorenumber.start(itemlist, item) support.videolibrary(itemlist, item) return itemlist
def findvideos(item): domain = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') if item.contentType == 'movie': id = item.url.split('/')[-1] url = domain + '/play_f.php?f=' + id else: url = item.url id = item.args['id'] season = item.args['season'] episode = item.args['episode'] res = support.match(item, 'src="([^"]+)">.*?</video>', url=url, headers=[['Referer', domain]]) itemlist = [] support.dbg() if res[0]: itemlist.append( Item(channel=item.channel, action="play", title='stpgs.ml' + support.typo(item.quality, '-- [] color kod'), url=res[0][0], server='directo', fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, quality=item.quality, contentType=item.contentType, folder=False)) download = itemlist[0].clone() if item.contentType == 'movie': download.url = downPrefix + id else: download.url = downPrefix + id + 'S' + season + '-' + episode itemlist.append(download) else: # google drive... pass support.videolibrary(itemlist, item) return support.controls(itemlist, item, True, True)
def findvideos(item): logger.info('[filmigratis.py] findvideos') data = httptools.downloadpage(item.url, headers=headers).data itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.title + '[COLOR green][B] - ' + videoitem.title + '[/B][/COLOR]' videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = item.channel videoitem.contentType = item.content if item.args == "film": support.videolibrary(itemlist, item, 'color kod') autoplay.start(itemlist, item) return itemlist