def findvideostv(item): logger.info("pelisalacarta.channels.allpeliculas findvideostv") itemlist = [] season = item.title.split(" ")[1] thumbnail = item.thumbnail #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = scrapertools.cachePage(item.url) data = data.replace("\n","").replace("\t", "") data = scrapertools.decodeHtmlentities(data) try: from core.tmdb import Tmdb otmdb= Tmdb(texto_buscado=item.fulltitle, tipo="tv") except: pass #Enlaces Online patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="'+season+'" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, episode, language, url in matches: enlaces = servertools.findvideos(data=url) if len(enlaces)> 0: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "[COLOR sandybrown][B]Episodio "+episode+"[/B][/COLOR] " titulo += "Enlace encontrado en [COLOR green][B]"+enlaces[0][0]+"[/B][/COLOR] [COLOR magenta]["+idioma+"][/COLOR] ["+calidad_videos.get(quality)+"]" servidor = enlaces[0][2] try: item.plot, thumbnail = infoepi(otmdb, season, episode) except: pass itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo, url=enlaces[0][1], fulltitle = item.fulltitle, thumbnail=thumbnail, fanart=item.fanart, plot=str(item.plot), extra=episode, folder=False)) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="'+season+'" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, episode, language, url in matches: mostrar_server = True enlaces = servertools.findvideos(data=url) if len(enlaces)> 0: servidor = enlaces[0][2] if config.get_setting("hidepremium")=="true": mostrar_server = servertools.is_server_enabled(servidor) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "[COLOR sandybrown][B]Episodio "+episode+"[/B][/COLOR] " titulo += "Enlace encontrado en [COLOR green][B]"+enlaces[0][0]+"[/B][/COLOR] ["+idioma+"] ["+calidad_videos.get(quality)+"]" try: item.plot, thumbnail = infoepi(otmdb, season, episode) except: pass itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=thumbnail , fanart=item.fanart, plot=str(item.plot) , extra=episode, folder=False) ) itemlist.sort(key=lambda item:(int(item.extra), item.title)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.descargasmix findvideos") itemlist = [] data = scrapertools.cachePage(item.url) sinopsis = scrapertools.find_single_match(data, '<strong>SINOPSIS</strong>:(.*?)</p>') fanart = item.fanart try: sinopsis, fanart = info(item.fulltitle, "movie", sinopsis) except: pass #Patron torrent patron = 'class="separate3 magnet".*?href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: itemlist.append( Item(channel=__channel__, action="play", server="torrent", title="[COLOR green][Enlace en Torrent][/COLOR]" , fulltitle = item.fulltitle, url=scrapedurl , thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , context = "0", contentTitle=item.fulltitle, folder=False) ) #Patron online data_online = scrapertools.find_single_match(data, 'Enlaces para ver online(.*?)<div class="section-box related-posts">') if len(data_online)> 0: patron = 'dm\(c.a\(\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data_online, patron) for code in matches: enlace = dm(code) enlaces = servertools.findvideos(data=enlace) titulo = "Enlace encontrado en [COLOR sandybrown]"+enlaces[0][0]+"[/COLOR]" if len(enlaces)> 0: itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo, url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , context = "0", contentTitle=item.fulltitle, viewmode="movie_with_plot", folder=False) ) #Patron descarga data_descarga = scrapertools.find_single_match(data, 'Enlaces de descarga(.*?)<script>') patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data_descarga, patron) for scrapedserver, scrapedurl in matches: if (scrapedserver == "ul") | (scrapedserver == "uploaded"): scrapedserver = "uploadedto" scrapedserver = scrapedserver.replace("abelhas","lolabits") titulo = scrapedserver.capitalize() if titulo == "Magnet":continue mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (scrapedserver) if mostrar_server: try: servers_module = __import__("servers."+scrapedserver) #Saca numero de enlaces patron = "(dm\(c.a\('"+scrapedurl.replace("+","\+")+"'.*?)</div>" data_enlaces = scrapertools.find_single_match(data_descarga, patron) patron = 'dm\(c.a\(\'([^\']+)\'' matches_enlaces = scrapertools.find_multiple_matches(data_enlaces,patron) numero = str(len(matches_enlaces)) itemlist.append( Item(channel=__channel__, action="enlaces", title=titulo+" - Nº enlaces:"+numero , url=item.url , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , extra=scrapedurl, context = "0", contentTitle=item.fulltitle, viewmode="movie_with_plot", folder=True) ) except: pass return itemlist
def epienlaces(item): logger.info("pelisalacarta.channels.descargasmix epienlaces") itemlist = [] data = scrapertools.cachePage(item.url) data = data.replace("\n","").replace("\t", "") #Bloque de enlaces si viene de enlaces de descarga u online delimitador = item.extra.replace("(","\(").replace(")","\)") if delimitador.startswith("Ver"): patron = delimitador+'</h3>(<div class="episode-server">.*?)(?:</li>|<div class="episode-title">)' else: patron = delimitador+'</h3><div class="episode-title">(.*?)(?:<h3 style="text-transform: uppercase;font-size: 18px;">|</li>)' bloque = scrapertools.find_single_match(data, patron) patron = '<div class="episode-server">.*?href="([^"]+)"' patron += '.*?data-server="([^"]+)"' patron += '.*?<div style="float:left;width:140px;">(.*?)</div>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedserver, scrapedcalidad in matches: if scrapedserver == "ul": scrapedserver = "uploadedto" scrapedserver = scrapedserver.replace("abelhas","lolabits") titulo = scrapedserver.capitalize()+" ["+scrapedcalidad+"]" #Enlaces descarga if delimitador.startswith("Descargar"): if scrapedserver == "magnet": titulo = titulo.replace("Magnet", "[COLOR green][Enlace en Torrent][/COLOR]") itemlist.append( Item(channel=__channel__, action="play", title=titulo, server="torrent", url=scrapedurl , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, folder=False) ) else: mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (scrapedserver) if mostrar_server: try: servers_module = __import__("servers."+scrapedserver) itemlist.append( Item(channel=__channel__, action="play_episodios", title=titulo , fulltitle = item.fulltitle, url=scrapedurl , thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, extra=item.url, folder=True) ) except: pass itemlist.sort(key=lambda item: item.title, reverse=True) #Enlaces online else: if "http://descargasmix" in scrapedurl: DEFAULT_HEADERS.append( ["Referer", item.url] ) scrapedurl = scrapertools.get_header_from_response(scrapedurl,header_to_get="location",headers=DEFAULT_HEADERS) enlaces = servertools.findvideos(data=scrapedurl) if len(enlaces)> 0: for enlace in enlaces: titulo = "Enlace encontrado en [COLOR sandybrown]"+enlaces[0][0]+"[/COLOR] ["+scrapedcalidad+"]" itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, folder=False) ) return itemlist
def epienlaces(item): logger.info("pelisalacarta.channels.descargasmix epienlaces") itemlist = [] data = scrapertools.cachePage(item.url) data = data.replace("\n","").replace("\t", "") #Bloque de enlaces delimitador = item.title.replace(item.show,"") patron = delimitador+'\s*</strong>(.*?)(?:</strong>|<div class="section-box related-posts")' bloque = scrapertools.find_single_match(data, patron) logger.info(bloque) patron = '<div class="episode-server">.*?href="([^"]+)"' patron += '.*?data-server="([^"]+)"' patron += '.*?<div style="float:left;width:140px;">(.*?)</div>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedserver, scrapedcalidad in matches: if scrapedserver == "ul": scrapedserver = "uploadedto" if scrapedserver == "streamin": scrapedserver = "streaminto" titulo = scrapedserver.capitalize()+" ["+scrapedcalidad+"]" #Enlaces descarga if scrapedserver == "magnet": titulo = titulo.replace("Magnet", "[COLOR green][Enlace en Torrent][/COLOR]") itemlist.insert(0, Item(channel=__channel__, action="play", title=titulo, server="torrent", url=scrapedurl , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, folder=False) ) else: mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (scrapedserver) if mostrar_server: try: servers_module = __import__("servers."+scrapedserver) if "http://descargasmix" in scrapedurl: DEFAULT_HEADERS.append( ["Referer", item.url] ) data = scrapertools.cache_page(scrapedurl, headers=DEFAULT_HEADERS) scrapedurl = scrapertools.find_single_match(data, 'iframe src="([^"]+)"') enlaces = servertools.findvideos(data=scrapedurl) if len(enlaces)> 0: for enlace in enlaces: titulo = "Enlace encontrado en [COLOR sandybrown]"+enlaces[0][0]+"[/COLOR] ["+scrapedcalidad+"]" itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, folder=False) ) except: pass if config.get_library_support() and item.category == "": itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url+"|", action="add_pelicula_to_library", extra="epienlaces", fulltitle=item.title, show=item.title)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.allpeliculas findvideos") itemlist = [] sinopsis = item.plot fanart = item.fanart #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = scrapertools.cachePage(item.url) data = data.replace("\n","").replace("\t", "") data = scrapertools.decodeHtmlentities(data) try: sinopsis, fanart = info(item.fulltitle, "movie", scrapertools.find_single_match(sinopsis,"plot:'([^']+)'")) except: pass #Enlaces Online patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for calidad, language, url in matches: enlaces = servertools.findvideos(data=url) if len(enlaces)> 0: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "Enlace encontrado en [COLOR green][B]"+enlaces[0][0]+"[/B][/COLOR] [COLOR sandybrown]["+idioma+"][/COLOR] ["+calidad_videos.get(calidad)+"]" servidor = enlaces[0][2] itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , extra=idioma, folder=False) ) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for calidad, language, url in matches: mostrar_server = True enlaces = servertools.findvideos(data=url) if len(enlaces)> 0: servidor = enlaces[0][2] if config.get_setting("hidepremium")=="true": mostrar_server = servertools.is_server_enabled(servidor) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "Enlace encontrado en [COLOR blue][B]"+enlaces[0][0]+"[/B][/COLOR] [COLOR sandybrown]["+idioma+"][/COLOR] ["+calidad_videos.get(calidad)+"]" itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , extra=idioma, folder=False) ) itemlist.sort(key=lambda item:(item.extra, item.server)) if len(itemlist) > 0 and item.category == "" or item.category == "Buscador": if config.get_library_support(): itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", fulltitle=item.fulltitle, show=item.fulltitle)) return itemlist
def findvideostv(item): logger.info("pelisalacarta.channels.allpeliculas findvideostv") itemlist = [] season = item.title.split(" ")[1] thumbnail = item.thumbnail #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = scrapertools.cachePage(item.url) data = data.replace("\n", "").replace("\t", "") data = scrapertools.decodeHtmlentities(data) try: from core.tmdb import Tmdb otmdb = Tmdb(texto_buscado=item.fulltitle, tipo="tv") except: pass #Enlaces Online patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="' + season + '" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, episode, language, url in matches: enlaces = servertools.findvideos(data=url) if len(enlaces) > 0: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "[COLOR sandybrown][B]Episodio " + episode + "[/B][/COLOR] " titulo += "Enlace encontrado en [COLOR green][B]" + enlaces[0][ 0] + "[/B][/COLOR] [COLOR magenta][" + idioma + "][/COLOR] [" + calidad_videos.get( quality) + "]" servidor = enlaces[0][2] try: item.plot, thumbnail = infoepi(otmdb, season, episode) except: pass itemlist.append( Item(channel=__channel__, action="play", server=servidor, title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=thumbnail, fanart=item.fanart, plot=str(item.plot), extra=episode, folder=False)) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="' + season + '" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, episode, language, url in matches: mostrar_server = True enlaces = servertools.findvideos(data=url) if len(enlaces) > 0: servidor = enlaces[0][2] if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(servidor) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "[COLOR sandybrown][B]Episodio " + episode + "[/B][/COLOR] " titulo += "Enlace encontrado en [COLOR green][B]" + enlaces[0][ 0] + "[/B][/COLOR] [" + idioma + "] [" + calidad_videos.get( quality) + "]" try: item.plot, thumbnail = infoepi(otmdb, season, episode) except: pass itemlist.append( Item(channel=__channel__, action="play", server=servidor, title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=thumbnail, fanart=item.fanart, plot=str(item.plot), extra=episode, folder=False)) itemlist.sort(key=lambda item: (int(item.extra), item.title)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.allpeliculas findvideos") itemlist = [] sinopsis = item.plot fanart = item.fanart #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = scrapertools.cachePage(item.url) data = data.replace("\n", "").replace("\t", "") data = scrapertools.decodeHtmlentities(data) try: sinopsis, fanart = info( item.fulltitle, "movie", scrapertools.find_single_match(sinopsis, "plot:'([^']+)'")) except: pass #Enlaces Online patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for calidad, language, url in matches: enlaces = servertools.findvideos(data=url) if len(enlaces) > 0: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "Enlace encontrado en [COLOR green][B]" + enlaces[0][ 0] + "[/B][/COLOR] [COLOR sandybrown][" + idioma + "][/COLOR] [" + calidad_videos.get( calidad) + "]" servidor = enlaces[0][2] itemlist.append( Item(channel=__channel__, action="play", server=servidor, title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), extra=idioma, folder=False)) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for calidad, language, url in matches: mostrar_server = True enlaces = servertools.findvideos(data=url) if len(enlaces) > 0: servidor = enlaces[0][2] if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(servidor) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "Enlace encontrado en [COLOR blue][B]" + enlaces[0][ 0] + "[/B][/COLOR] [COLOR sandybrown][" + idioma + "][/COLOR] [" + calidad_videos.get( calidad) + "]" itemlist.append( Item(channel=__channel__, action="play", server=servidor, title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), extra=idioma, folder=False)) itemlist.sort(key=lambda item: (item.extra, item.server)) if len(itemlist ) > 0 and item.category == "" or item.category == "Buscador": if config.get_library_support(): itemlist.append( Item( channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", fulltitle=item.fulltitle, show=item.fulltitle)) return itemlist
def findvideos(item): logger.info("[newpct1.py] findvideos") itemlist=[] ## Cualquiera de las tres opciones son válidas #item.url = item.url.replace("1.com/","1.com/ver-online/") #item.url = item.url.replace("1.com/","1.com/descarga-directa/") item.url = item.url.replace("1.com/","1.com/descarga-torrent/") # Descarga la página data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url)) data = unicode( data, "iso-8859-1" , errors="replace" ).encode("utf-8") title = scrapertools.find_single_match(data,"<h1><strong>([^<]+)</strong>[^<]+</h1>") title+= scrapertools.find_single_match(data,"<h1><strong>[^<]+</strong>([^<]+)</h1>") caratula = scrapertools.find_single_match(data,'<div class="entry-left">.*?src="([^"]+)"') #<a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a> patron = '<a href="([^"]+)" title="[^"]+" class="btn-torrent" target="_blank">' # escraped torrent url = scrapertools.find_single_match(data,patron) if url!="": itemlist.append( Item(channel=__channel__, action="play", server="torrent", title=title+" [torrent]", fulltitle=title, url=url , thumbnail=caratula, plot=item.plot, folder=False) ) # escraped ver vídeos, descargar vídeos un link, múltiples liks data = data.replace("'",'"') data = data.replace('javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=',"") data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=","") data = data.replace("$!","#!") patron_descargar = '<div id="tab2"[^>]+>.*?</ul>' patron_ver = '<div id="tab3"[^>]+>.*?</ul>' match_ver = scrapertools.find_single_match(data,patron_ver) match_descargar = scrapertools.find_single_match(data,patron_descargar) patron = '<div class="box1"><img src="([^"]+)".*?' # logo patron+= '<div class="box2">([^<]+)</div>' # servidor patron+= '<div class="box3">([^<]+)</div>' # idioma patron+= '<div class="box4">([^<]+)</div>' # calidad patron+= '<div class="box5"><a href="([^"]+)".*?' # enlace patron+= '<div class="box6">([^<]+)</div>' # titulo enlaces_ver = re.compile(patron,re.DOTALL).findall(match_ver) enlaces_descargar = re.compile(patron,re.DOTALL).findall(match_descargar) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: servidor = servidor.replace("streamin","streaminto") titulo = titulo+" ["+servidor+"]" mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (servidor) if mostrar_server: try: servers_module = __import__("servers."+servidor) server_module = getattr(servers_module,servidor) devuelve= server_module.find_videos(enlace) if devuelve: enlace=devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=__channel__, action="play", server=servidor, title=titulo , fulltitle = item.title, url=enlace , thumbnail=logo , plot=item.plot, folder=False) ) except: pass for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: servidor = servidor.replace("uploaded","uploadedto") partes = enlace.split(" ") p = 1 for enlace in partes: parte_titulo = titulo+" (%s/%s)" % (p,len(partes)) + " ["+servidor+"]" p+= 1 mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (servidor) if mostrar_server: try: servers_module = __import__("servers."+servidor) server_module = getattr(servers_module,servidor) devuelve= server_module.find_videos(enlace) if devuelve: enlace=devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=__channel__, action="play", server=servidor, title=titulo , fulltitle = item.title, url=enlace , thumbnail=logo , plot=item.plot, folder=False) ) except: pass return itemlist
def findvideos(item): logger.info("[peliserie.py] findvideos extra: " + item.extra) itemlist=[] if item.extra=='peliculas': # Solo mostramos enlaces para ver online patron= 'id="contribution-view">(.*?)</ul>' # Si quisiseramos mostrarlos todos: patron= 'id="contribution-view">(.*?)class="list-end"' # Buscamos el fanart en TMDB #year=item.show.split('|')[1] #item.show = item.show.split('|')[0] #item.fanart= get_fanart_tmdb(item.show, year= year) else: # 'series' y 'play_from_library' # Solo mostramos enlaces para ver online patron= 'id="view-list">(.*?)</ul>' # Si quisiseramos mostrarlos todos: patron= 'id="id="view-list">(.*?)class="list-end"' # Descarga la página data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url)) data= scrapertools.get_match(data,patron) patron = '<li data-id="(.*?)</li>' matches = re.compile(patron,re.DOTALL).findall(data) ''' <li data-id="53885"> <div class="column"><strong>Allmyvideos</strong></div> <div class="column" style="width:15%"> <img src="/images/flags/lang/flag_0.png"/> </div> <div class="column">BrScreener/Line</div> <div class="column">bibiamorant</div> <div class="column" style="width:25%"> <div class="btn s"> <a href="/external?action=movie&id=53885" class="" target="_blank">Ver online</a> </div> <div class="actions"> <i id="report-contribution" data-id="53885" class="iconx16 icon3"></i> </div> </div> </li> ''' for i in matches: servidor = scrapertools.get_match(i,'<div class="column"><strong>([^<]+)</strong>') logger.info(servidor) if servidor=="Streamin": servidor="Streaminto" if servidor=="Netu": servidor="Netutv" mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (servidor) if mostrar_server: idioma = scrapertools.get_match(i,'<img src="(.*?)"/>') if 'flag_0.png' in idioma: idioma ='Es' elif 'flag_1.png' in idioma: idioma ='Lat' elif 'flag_2.png' in idioma: idioma ='VO' elif 'flag_3.png' in idioma: idioma ='VOSE' calidad= scrapertools.get_match(i,'<div class="column">([^<]+)</div>') url= __url_base__ + scrapertools.get_match(i,'<a href="([^"]+)"') title= 'Ver en ' + servidor + ' [' + calidad + '] (' + idioma + ')' itemlist.append( Item(channel=__channel__, action="play", server=servidor, title=title , thumbnail=item.thumbnail, fanart= item.fanart, fulltitle = item.title, url=url , extra=item.extra, folder=False) ) return itemlist
def findvideos(item): logger.info("[peliserie.py] findvideos extra: " + item.extra) itemlist=[] if item.extra=='peliculas': # Solo mostramos enlaces para ver online patron= 'id="contribution-view">(.*?)</ul>' # Si quisiseramos mostrarlos todos: patron= 'id="contribution-view">(.*?)class="list-end"' # Buscamos el fanart en TMDB year=item.show.split('|')[1] item.show = item.show.split('|')[0] item.fanart= get_fanart_tmdb(item.show, year= year) else: # 'series' y 'play_from_library' # Solo mostramos enlaces para ver online patron= 'id="view-list">(.*?)</ul>' # Si quisiseramos mostrarlos todos: patron= 'id="id="view-list">(.*?)class="list-end"' # Descarga la página data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url)) data= scrapertools.get_match(data,patron) patron = '<li data-id="(.*?)</li>' matches = re.compile(patron,re.DOTALL).findall(data) ''' <li data-id="53885"> <div class="column"><strong>Allmyvideos</strong></div> <div class="column" style="width:15%"> <img src="/images/flags/lang/flag_0.png"/> </div> <div class="column">BrScreener/Line</div> <div class="column">bibiamorant</div> <div class="column" style="width:25%"> <div class="btn s"> <a href="/external?action=movie&id=53885" class="" target="_blank">Ver online</a> </div> <div class="actions"> <i id="report-contribution" data-id="53885" class="iconx16 icon3"></i> </div> </div> </li> ''' for i in matches: servidor = scrapertools.get_match(i,'<div class="column"><strong>([^<]+)</strong>') mostrar_server= True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (servidor) if mostrar_server: idioma = scrapertools.get_match(i,'<img src="(.*?)"/>') if 'flag_0.png' in idioma: idioma ='Es' elif 'flag_1.png' in idioma: idioma ='Lat' elif 'flag_2.png' in idioma: idioma ='VO' elif 'flag_3.png' in idioma: idioma ='VOSE' calidad= scrapertools.get_match(i,'<div class="column">([^<]+)</div>') url= __url_base__ + scrapertools.get_match(i,'<a href="([^"]+)"') title= 'Ver en ' + servidor + ' [' + calidad + '] (' + idioma + ')' itemlist.append( Item(channel=__channel__, action="play", server=servidor, title=title , thumbnail=item.thumbnail, fanart= item.fanart, fulltitle = item.title, url=url , extra=item.extra, folder=False) ) return itemlist
def findvideos(item): logger.info("pelisalacarta.cultmoviez findvideos") if item.fanart == "": item.fanart = fanart itemlist=[] headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:22.0) Gecko/20100101 Firefox/22.0', 'Accept-Encoding': 'none', 'Host':'www.cultmoviez.info'} try: serie = item.extra.split("|")[0] id = item.extra.split("|")[1] url_wp_plugin = urlparse.urljoin(host,wp_plugin) data = agrupa_datos( scrapertools.cache_page(url_wp_plugin,post="serie="+serie+"&episodios=1&id="+id) ) #<div style='float:left; margin-right:10px; margin-bottom:10px;'><img src='https://lh4.googleusercontent.com/-_uQM5fI03ZE/UhrwpxoqEqI/AAAAAAAAIlA/pMF4wCIgNW8/s171/rsz_american-horror-story-season-1-new-promotional-poster-american-horror-story-24824740-1125-1500.jpg' width='132' height='175'/></div><p><div><strong><u>1.01 Pilot</u></strong></div></p><div><strong>Sinopsis:</strong> Ahora podrás ver American Horror Story: Murder House 1.01 Pilot online subtitulada # Primer capítulo de La primera temporada de American Horror Story la serie creada por Ryan Murphy y Brad Falchuk # # Un terapeuta y su familia se mudan de la ciudad para escaparse de sus problemas del pasado, pero rápidamente descubren que su nueva casa viene con su propio<p><div><a href='http://www.cultmoviez.info/12907'><img src='http://www.cultmoviez.info/wp-content/uploads/2013/10/ver-capitulo.png'/></a></div></p></div><div style='clear:both;'></div> url_for_servers_data = scrapertools.get_match(data,"<a href='([^']+)'>") data = agrupa_datos( scrapertools.cache_page(url_for_servers_data) ) except: data = agrupa_datos( scrapertools.cache_page(item.url, headers=headers) ) data = re.sub(r"hd=","=",data) data = data.replace("?&","?") #<iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?uphd=jxr5zqbl5tdt&bshd=ge8cd4xp&fkhd=5v4vb9em/CS01E01.mp4.html&billhd=ojgo8mwi1dvz&moohd=070i7sxmckbq&plahd=3rm7pwhruyk4&upahd=1n0yqd53swtg&vbhd=ugezmymo75bg&vk1hd=oid=191530510|id=167180035|hash=57a118c8723792e6|hd%3D2&id=00C01&sub=,ES&sub_pre=ES" frameborder="0" allowfullscreen></iframe> # <iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?bs=aeosek34&fk=t729bc9t/CultG240.mp4.html&up=k4n47ii5mgg7&vb=1wlt1mjdh5hx&dp=k8vs5y6j8&moo=p3b3vrlb421b&pla=xq5o2b930e7f&upa=22k5u2ivnts9&vk1=oid=251747296|id=169564765|hash=4947cca79d1da180|hd%3D2&v=2.0.2" frameborder="0" allowfullscreen></iframe> #<iframe width="650" height="360" scrolling="no" src="http://www.cultmoviez.info/newplayer/play.php?&bs=ricxefnc&fk=gamnlwjx/American.Horror.Story.S01E02.DVDRip.XviD-DEMAND.mp4.html&up=zjqtcmeio58c&id=001AHS2&sub=,ES&sub_pre=ES" frameborder="0" allowfullscreen></iframe> try: search_data_for_servers = scrapertools.get_match(data,"<iframe[^\?]+\?(.*?)&id=(.*?)&") except: search_data_for_servers = scrapertools.get_match(data,"<iframe[^\?]+\?(.*?)&v=(.*?)&") #Id para el subtitulo id = search_data_for_servers[1] + "_ES" servers_data_list = [] for serverdata in search_data_for_servers[0].split("&"): server_id = scrapertools.get_match(serverdata,"(^\w+)=") video_id = scrapertools.get_match(serverdata,"^\w+=(.*?$)") servers_data_list.append( [server_id, video_id] ) for server_id, video_id in servers_data_list: if server_id != "oid": server = server_label(server_id) mostrar_server = True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (server) if mostrar_server: try: if server != "uptostream": servers_module = __import__("servers."+server) video_link = server_link(server_id) % (video_id.replace("|","&")) # Comprueba si el enlace directo no está caído if server == "directo": post = "fv=20&url="+video_link+"&sou=pic" data = scrapertools.cache_page("http://www.cultmoviez.info/playercult/pk/pk/plugins/player_p2.php", post=post) if data == "": continue title = item.title + " [" + server + "]" itemlist.append( Item(channel=__channel__, title =title, url=video_link, action="play", thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, extra=id ) ) except: pass return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.descargasmix findvideos") if item.category == "Series" or item.show != "": return epienlaces(item) itemlist = [] data = scrapertools.cachePage(item.url) fanart = item.fanart sinopsis = scrapertools.find_single_match( data, '<strong>SINOPSIS</strong>:(.*?)</p>') if item.category == "": try: sinopsis, fanart = info(item.fulltitle, "movie", sinopsis) except: pass #Patron torrent patron = 'class="separate3 magnet".*?href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: title = urllib.unquote(scrapedurl) try: if item.fulltitle != "": titulo = item.fulltitle.strip().rsplit(" ", 1)[1] else: titulo = item.title.strip().rsplit(" ", 1)[1] except: if item.fulltitle != "": titulo = item.fulltitle.strip() else: titulo = item.title.strip() title = "[" + scrapertools.find_single_match( title, titulo + "(?:\.|)(.*?)(?:\.|[wW])") + "]" itemlist.append( Item(channel=__channel__, action="play", server="torrent", title="[COLOR green][Enlace en Torrent][/COLOR] " + title, fulltitle=item.fulltitle, url=scrapedurl, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), context="0", contentTitle=item.fulltitle, folder=False)) #Patron online data_online = scrapertools.find_single_match( data, 'Enlaces para ver online(.*?)<div class="section-box related-posts">') if len(data_online) > 0: patron = 'dm\(c.a\(\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data_online, patron) for code in matches: enlace = dm(code) enlaces = servertools.findvideos(data=enlace) titulo = "Enlace encontrado en [COLOR sandybrown]" + enlaces[0][ 0] + "[/COLOR]" if len(enlaces) > 0: itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), context="0", contentTitle=item.fulltitle, viewmode="movie_with_plot", folder=False)) #Patron descarga data_descarga = scrapertools.find_single_match( data, 'Enlaces de descarga(.*?)<script>') patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data_descarga, patron) for scrapedserver, scrapedurl in matches: if (scrapedserver == "ul") | (scrapedserver == "uploaded"): scrapedserver = "uploadedto" titulo = scrapedserver.capitalize() if titulo == "Magnet": continue mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(scrapedserver) if mostrar_server: try: servers_module = __import__("servers." + scrapedserver) #Saca numero de enlaces patron = "(dm\(c.a\('" + scrapedurl.replace( "+", "\+") + "'.*?)</div>" data_enlaces = scrapertools.find_single_match( data_descarga, patron) patron = 'dm\(c.a\(\'([^\']+)\'' matches_enlaces = scrapertools.find_multiple_matches( data_enlaces, patron) numero = str(len(matches_enlaces)) if item.category == "": itemlist.append( Item(channel=__channel__, action="enlaces", server="", title=titulo + " - Nº enlaces:" + numero, url=item.url, fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), extra=scrapedurl, context="0", contentTitle=item.fulltitle, viewmode="movie_with_plot", folder=True)) except: pass if config.get_library_support() and item.category == "": itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="findvideos", fulltitle=item.fulltitle.strip())) return itemlist
def epienlaces(item): logger.info("pelisalacarta.channels.descargasmix epienlaces") itemlist = [] data = scrapertools.cachePage(item.url) data = data.replace("\n", "").replace("\t", "") #Bloque de enlaces delimitador = item.title.replace(item.show, "") patron = delimitador + '\s*</strong>(.*?)(?:</strong>|<div class="section-box related-posts")' bloque = scrapertools.find_single_match(data, patron) logger.info(bloque) patron = '<div class="episode-server">.*?href="([^"]+)"' patron += '.*?data-server="([^"]+)"' patron += '.*?<div style="float:left;width:140px;">(.*?)</div>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedserver, scrapedcalidad in matches: if scrapedserver == "ul": scrapedserver = "uploadedto" if scrapedserver == "streamin": scrapedserver = "streaminto" titulo = scrapedserver.capitalize() + " [" + scrapedcalidad + "]" #Enlaces descarga if scrapedserver == "magnet": titulo = titulo.replace( "Magnet", "[COLOR green][Enlace en Torrent][/COLOR]") itemlist.insert( 0, Item(channel=__channel__, action="play", title=titulo, server="torrent", url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, folder=False)) else: mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(scrapedserver) if mostrar_server: try: servers_module = __import__("servers." + scrapedserver) if "http://descargasmix" in scrapedurl: DEFAULT_HEADERS.append(["Referer", item.url]) data = scrapertools.cache_page(scrapedurl, headers=DEFAULT_HEADERS) scrapedurl = scrapertools.find_single_match( data, 'iframe src="([^"]+)"') enlaces = servertools.findvideos(data=scrapedurl) if len(enlaces) > 0: for enlace in enlaces: titulo = "Enlace encontrado en [COLOR sandybrown]" + enlaces[ 0][0] + "[/COLOR] [" + scrapedcalidad + "]" itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, folder=False)) except: pass if config.get_library_support() and item.category == "": itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url + "|", action="add_pelicula_to_library", extra="epienlaces", fulltitle=item.title, show=item.title)) return itemlist
def epienlaces(item): logger.info("pelisalacarta.channels.descargasmix epienlaces") itemlist = [] data = scrapertools.cachePage(item.url) data = data.replace("\n", "").replace("\t", "") #Bloque de enlaces si viene de enlaces de descarga u online delimitador = item.extra.replace("(", "\(").replace(")", "\)") if delimitador.startswith("Ver"): patron = delimitador + '</h3>(<div class="episode-server">.*?)(?:</li>|<div class="episode-title">)' else: patron = delimitador + '</h3><div class="episode-title">(.*?)(?:<h3 style="text-transform: uppercase;font-size: 18px;">|</li>)' bloque = scrapertools.find_single_match(data, patron) patron = '<div class="episode-server">.*?href="([^"]+)"' patron += '.*?data-server="([^"]+)"' patron += '.*?<div style="float:left;width:140px;">(.*?)</div>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedserver, scrapedcalidad in matches: if scrapedserver == "ul": scrapedserver = "uploadedto" titulo = scrapedserver.capitalize() + " [" + scrapedcalidad + "]" #Enlaces descarga if delimitador.startswith("Descargar"): if scrapedserver == "magnet": titulo = titulo.replace( "Magnet", "[COLOR green][Enlace en Torrent][/COLOR]") itemlist.append( Item(channel=__channel__, action="play", title=titulo, server="torrent", url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, folder=False)) else: mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled( scrapedserver) if mostrar_server: try: servers_module = __import__("servers." + scrapedserver) itemlist.append( Item(channel=__channel__, action="play_episodios", title=titulo, fulltitle=item.fulltitle, url=scrapedurl, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, extra=item.url, folder=True)) except: pass itemlist.sort(key=lambda item: item.title, reverse=True) #Enlaces online else: enlaces = servertools.findvideos(data=scrapedurl) if len(enlaces) > 0: titulo = "Enlace encontrado en [COLOR sandybrown]" + enlaces[ 0][0] + "[/COLOR] [" + scrapedcalidad + "]" itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, folder=False)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.hdfull findvideos") itemlist = [] ## Carga estados status = jsontools.load_json( scrapertools.cache_page(host + '/a/status/all')) url_targets = item.url ## Vídeos if "###" in item.url: id = item.url.split("###")[1].split(";")[0] type = item.url.split("###")[1].split(";")[1] item.url = item.url.split("###")[0] if type == "2" and account and item.category != "Cine": title = bbcode_kodi2html( " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )") if "Favorito" in item.title: title = bbcode_kodi2html( " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )") if config.get_library_support(): title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") itemlist.append( Item(channel=__channel__, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False)) title_label = bbcode_kodi2html( " ( [COLOR green][B]Tráiler[/B][/COLOR] )") itemlist.append( Item(channel=__channel__, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show)) itemlist.append( Item(channel=__channel__, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True)) data = agrupa_datos(scrapertools.cache_page(item.url)) patron = '<div class="embed-selector"[^<]+' patron += '<h5 class="left"[^<]+' patron += '<span[^<]+<b class="key">\s*Idioma.\s*</b>([^<]+)</span[^<]+' patron += '<span[^<]+<b class="key">\s*Servidor.\s*</b><b[^>]+>([^<]+)</b[^<]+</span[^<]+' patron += '<span[^<]+<b class="key">\s*Calidad.\s*</b>([^<]+)</span[^<]+</h5.*?' patron += '<a href="(http[^"]+)".*?' patron += '</i>([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for idioma, servername, calidad, url, opcion in matches: opcion = opcion.strip() if opcion != "Descargar": opcion = "Ver" title = opcion + ": " + servername.strip() + " (" + calidad.strip( ) + ")" + " (" + idioma.strip() + ")" title = scrapertools.htmlclean(title) #Se comprueba si existe el conector y si se oculta en caso de premium servername = servername.lower().split(".")[0] if servername == "streamin": servername = "streaminto" if servername == "waaw": servername = "netutv" if servername == "ul": servername = "uploadedto" mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(servername) if mostrar_server: try: servers_module = __import__("servers." + servername) thumbnail = item.thumbnail plot = item.title + "\n\n" + scrapertools.find_single_match( data, '<meta property="og:description" content="([^"]+)"') plot = scrapertools.htmlclean(plot) fanart = scrapertools.find_single_match( data, '<div style="background-image.url. ([^\s]+)') url += "###" + id + ";" + type itemlist.append( Item(channel=__channel__, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, show=item.show, folder=True)) except: pass ## 2 = película if type == "2" and item.category != "Cine": ## STRM para todos los enlaces de servidores disponibles ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..." try: itemlist.extend(file_cine_library(item, url_targets)) except: pass return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.hdfull findvideos") itemlist=[] ## Carga estados status = jsontools.load_json(scrapertools.cache_page(host+'/a/status/all')) url_targets = item.url ## Vídeos if "###" in item.url: id = item.url.split("###")[1].split(";")[0] type = item.url.split("###")[1].split(";")[1] item.url = item.url.split("###")[0] if type == "2" and account and item.category != "Cine": title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )") if "Favorito" in item.title: title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )") if config.get_library_support(): title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") itemlist.append( Item( channel=__channel__, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False ) ) title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )") itemlist.append( Item( channel=__channel__, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show ) ) itemlist.append( Item( channel=__channel__, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True ) ) data = agrupa_datos( scrapertools.cache_page(item.url) ) patron = '<div class="embed-selector"[^<]+' patron += '<h5 class="left"[^<]+' patron += '<span[^<]+<b class="key">\s*Idioma.\s*</b>([^<]+)</span[^<]+' patron += '<span[^<]+<b class="key">\s*Servidor.\s*</b><b[^>]+>([^<]+)</b[^<]+</span[^<]+' patron += '<span[^<]+<b class="key">\s*Calidad.\s*</b>([^<]+)</span[^<]+</h5.*?' patron += '<a href="(http[^"]+)".*?' patron += '</i>([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) for idioma,servername,calidad,url,opcion in matches: opcion = opcion.strip() if opcion != "Descargar": opcion = "Ver" title = opcion+": "+servername.strip()+" ("+calidad.strip()+")"+" ("+idioma.strip()+")" title = scrapertools.htmlclean(title) #Se comprueba si existe el conector y si se oculta en caso de premium servername = servername.lower().split(".")[0] if servername == "streamin": servername = "streaminto" if servername== "waaw": servername = "netutv" if servername == "ul": servername = "uploadedto" mostrar_server = True if config.get_setting("hidepremium")=="true": mostrar_server= servertools.is_server_enabled (servername) if mostrar_server: try: servers_module = __import__("servers."+servername) thumbnail = item.thumbnail plot = item.title+"\n\n"+scrapertools.find_single_match(data,'<meta property="og:description" content="([^"]+)"') plot = scrapertools.htmlclean(plot) fanart = scrapertools.find_single_match(data,'<div style="background-image.url. ([^\s]+)') url+= "###" + id + ";" + type itemlist.append( Item( channel=__channel__, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, show=item.show, folder=True ) ) except: pass ## 2 = película if type == "2" and item.category != "Cine": ## STRM para todos los enlaces de servidores disponibles ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..." try: itemlist.extend( file_cine_library(item,url_targets) ) except: pass return itemlist