def findvideostv(item): logger.info("pelisalacarta.channels.allpeliculas findvideostv") itemlist = [] #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = scrapertools.downloadpage(item.url) data = data.replace("\n", "").replace("\t", "") data = scrapertools.decodeHtmlentities(data) patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \ '"([^"]+)" season="' + \ item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, servidor_num, episode, language, url in matches: try: server = SERVERS[servidor_num] servers_module = __import__("servers."+server) except: server = servertools.get_server_from_url(url) if server != "directo": idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "Episodio "+episode+" [" titulo += server.capitalize()+"] ["+idioma+"] ("+calidad_videos.get(quality)+")" item.infoLabels['episode'] = episode itemlist.append(item.clone(action="play", title=titulo, url=url)) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \ '"([^"]+)" season="'+item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, servidor_num, episode, language, url in matches: mostrar_server = True try: server = SERVERS[servidor_num] servers_module = __import__("servers."+server) except: server = servertools.get_server_from_url(url) if server != "directo": if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(server) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "Episodio "+episode+" " titulo += server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")" item.infoLabels['episode'] = episode itemlist.append(item.clone(action="play", title=titulo, url=url)) itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title)) try: from core import tmdb tmdb.set_infoLabels(itemlist, __modo_grafico__) except: pass return itemlist
def listadoCapitulos(item): logger.info("pelisalacarta.channels.tremendaseries capitulos") itemlist = [] conEnlaces= False data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url)) patron = '<div class="tit_enlaces"><ul>(.*?)<div class="addthis_sharing_toolbox"' data = scrapertools.find_single_match(data,patron) patron = '<a href="([^"]+).*?' #url patron += '<div class="enlaces" style="([^"]+).*?' #enlaces? if background-color in style: no hay enlaces patron += '<span class="icon-forward3">.*?</span>([^<]+).*?' #TemporadaxEpisodio patron += '<div class="text_en_boton" [^>]+>(.*?)</div><div class="text_en_boton2"' #title matches = scrapertools.find_multiple_matches(data,patron) for scrapedurl, scrapedtenlaces, scrapedcapitulo, scrapedtitle in matches: temporada, episodio = scrapedcapitulo.split('x') if item.extra !="serie_add" and temporada != item.infoLabels['season']: # solo nos interesan los enlaces de esta temporada continue if '</span>' in scrapedtitle: data = '<' + scrapertools.find_single_match(scrapedtitle,'<([^>]+)') + '>' scrapedtitle = scrapedtitle.replace(data,'') scrapedtitle = scrapedtitle.replace('</span>','') scrapedtitle = re.sub(r'(S\d*E\d*)', scrapedcapitulo, scrapedtitle, re.I) #Sustituir S01E01 por 1x01 newItem= item.clone(title= scrapedtitle, url= scrapedurl, text_color= color1, action="findvideos") newItem.infoLabels['season'] = temporada newItem.infoLabels['episode'] = episodio if not 'background-color' in scrapedtenlaces: conEnlaces = True elif item.extra !="serie_add": # No hay enlaces para este capitulos. Añadirlo como una etiqueta (TAG) ... # ...excepto si estamos añadiendolo a la biblioteca. newItem.action = '' newItem.text_color = color3 newItem.thumbnail = thumbnail_host itemlist.append(newItem) if item.extra !="serie_add": # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadirselo al titulo del item i.title = "%sx%s %s" %(i.infoLabels['season'],i.infoLabels['episode'],i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] ''' if config.get_library_support() and conEnlaces: itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios###serie_add", show= item.show, thumbnail = thumbnail_host, fanart= fanart, text_color= color3))''' return itemlist
def findvideos(item): logger.info() itemlist = [] try: filtro_idioma = config.get_setting("filterlanguages", item.channel) filtro_enlaces = config.get_setting("filterlinks", item.channel) except: filtro_idioma = 3 filtro_enlaces = 2 dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0} # Busca el argumento data = httptools.downloadpage(item.url).data year = scrapertools.find_single_match(data, '<h1><span>.*?rel="tag">([^<]+)</a>') if year and item.extra != "library": item.infoLabels['year'] = int(year) # Ampliamos datos en tmdb if not item.infoLabels['plot']: try: tmdb.set_infoLabels(item, __modo_grafico__) except: pass if not item.infoLabels.get('plot'): plot = scrapertools.find_single_match(data, '<div class="sinopsis"><p>(.*?)</p>') item.infoLabels['plot'] = plot if filtro_enlaces != 0: list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item) if list_enlaces: itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1, text_blod=True)) itemlist.extend(list_enlaces) if filtro_enlaces != 1: list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "descarga", item) if list_enlaces: itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1, text_blod=True)) itemlist.extend(list_enlaces) if itemlist: itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", text_color="magenta")) # Opción "Añadir esta película a la biblioteca de XBMC" if item.extra != "library": if config.get_library_support(): itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", text_color="green", filtro=True, action="add_pelicula_to_library", url=item.url, infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle, extra="library")) else: itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3)) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.pelispedia episodios") itemlist = [] # Descarga la página data = scrapertools.anti_cloudflare(item.url , host=CHANNEL_HOST , headers=CHANNEL_DEFAULT_HEADERS ) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) patron = '<li class="clearfix gutterVertical20"><a href="([^"]+)".*?><small>(.*?)</small>.*?' \ '<span class.+?>(.*?)</span>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedname in matches: logger.info("scrap {}".format(scrapedtitle)) patron = 'Season\s+(\d),\s+Episode\s+(\d+)' match = re.compile(patron, re.DOTALL).findall(scrapedtitle) season, episode = match[0] if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): continue title = "{season}x{episode}: {name}".format(season=season, episode=episode.zfill(2), name=scrapertools.unescape(scrapedname)) new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3) if 'infoLabels' not in new_item: new_item.infoLabels={} new_item.infoLabels['season'] = season new_item.infoLabels['episode'] = episode.zfill(2) itemlist.append(new_item) #TODO no hacer esto si estamos añadiendo a la biblioteca if not item.extra: # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadirselo al titulo del item i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] itemlist.sort(key=lambda item: item.title, reverse=config.get_setting('orden_episodios',__channel__)) # Opción "Añadir esta serie a la biblioteca de XBMC" if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1,thumbnail=thumbnail_host, fanart= fanart_host)) return itemlist
def get_episodios(item): logger.info() itemlist = [] #infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) patron = 'vars.title =(.*?)};' try: data_dict = jsontools.load_json(scrapertools.get_match(data, patron) + '}') except: return itemlist # Devolvemos lista vacia # Agrupar enlaces por episodios temXcap temXcap_dict={} for link in data_dict['link']: try: season = str(int(link['season'])) episode = str(int(link['episode'])).zfill(2) except: continue if int(season) != item.infoLabels["season"] and item.extra != "serie_add": # Descartamos episodios de otras temporadas, excepto si los queremos todos continue title_id = link['title_id'] id = season + "x" + episode if id in temXcap_dict: l = temXcap_dict[id] l.append(link) temXcap_dict[id] = l else: temXcap_dict[id]= [link] # Ordenar lista de enlaces por temporada y capitulo temXcap_list = temXcap_dict.items() temXcap_list.sort(key=lambda x: (int(x[0].split("x")[0]),int(x[0].split("x")[1]))) for episodio in temXcap_list: title = '%s (%s)' % (item.contentSerieName, episodio[0]) item.infoLabels['season'], item.infoLabels['episode']= episodio[0].split('x') itemlist.append(item.clone(action= "findvideos", title=title, viewmode="movie_with_plot", text_color="0xFFFFCE9C")) if item.extra != "serie_add": # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos tmdb.set_infoLabels(itemlist) for i in itemlist: # Si el capitulo tiene nombre propio añadirselo al titulo del item title = "%s: %s" % (i.title, i.infoLabels['title']) i.title = title return itemlist
def findvideos(item): logger.info() itemlist = [] item.text_color = color2 # Descarga la pagina data = httptools.downloadpage(item.url).data sinopsis = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>.*?>(.*?)</p>') item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis) # Busca en tmdb si no se ha hecho antes if item.extra != "eroticas": if item.extra != "library": year = scrapertools.find_single_match(data, 'Año de lanzamiento.*?"ab">(\d+)') if year: try: item.infoLabels['year'] = year # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(item, __modo_grafico__) except: pass trailer_url = scrapertools.find_single_match(data, 'id="trailerpro">.*?src="([^"]+)"') item.infoLabels["trailer"] = "www.youtube.com/watch?v=TqqF3-qgJw4" patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>' matches = scrapertools.find_multiple_matches(data, patron) for url, server, idioma, calidad in matches: if server == "Embed": server = "Nowvideo" if server == "Ul": server = "Uploaded" title = "%s [%s][%s]" % (server, idioma, calidad) itemlist.append(item.clone(action="play", title=title, url=url)) patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for id_embed, calidad, url in matches: title = scrapertools.find_single_match(url, "(?:http://|https://|//)(.*?)(?:embed.|videoembed|)/") if re.search(r"(?i)inkapelis|goo.gl", title): title = "Directo" idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed) title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad) itemlist.append(item.clone(action="play", title=title, url=url)) if itemlist: if not config.get_setting('menu_trailer', item.channel): itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", text_color="magenta", context="")) if item.extra != "library": if config.get_library_support(): itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", action="add_pelicula_to_library", url=item.url, fulltitle=item.fulltitle, infoLabels={'title': item.fulltitle}, text_color="green", extra="library")) return itemlist
def listadoSeries(item): logger.info("pelisalacarta.channels.tremendaseries listadoSeries") itemlist = [] if __modo_grafico__: nItemxPage = 28 else: nItemxPage = 100 # o los que haya en la pagina data0 = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url)) patron = '<div class="nuevos_caps"(.*?)</div></div>' data = scrapertools.find_single_match(data0,patron) patron = '<div class="portadas_home">.*?href="([^"]+).*?' #url patron += 'title="([^"]+).*?' #titulo patron += 'src=series([^&]+).*?' #thumbnail matches = scrapertools.find_multiple_matches(data,patron) item_inicial= int(item.extra) items_total = len(matches) if (item_inicial + nItemxPage) < items_total: item_final = item_inicial + nItemxPage else: item_final = items_total matches = matches[item_inicial:item_final] #logger.debug(" %i - %i - %i" %(item_inicial, item_final, items_total)) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: thumbnail = 'http://tremendaseries.com/screen' + scrapedthumbnail newItem = Item(channel=__channel__,action="listadoTemporadas", title=scrapedtitle.strip(),url=scrapedurl, thumbnail=thumbnail, text_color= color1, folder=True, fanart=fanart, show= scrapedtitle) itemlist.append (newItem) logger.debug(str(len(itemlist))) # Obtenemos los datos basicos de todas las series mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) if itemlist: #Paginacion solo cuando hay resultados: # Se pagina en subpaginas cuando el resultado es mayor q nItemxPage # Se pagina normal cuando ya no hay mas resultados por mostrar en la url if item_final < items_total: # Siguiente sub pagina itemlist.append(Item(channel=__channel__, action="listadoSeries", title=">> Página siguiente", url=item.url, text_color= color2, fanart=fanart, thumbnail= thumbnail_host, extra= str(nItemxPage))) else: # Siguiente pagina en web patron = '<span class="current">.*?<a href="([^"]+)' url = scrapertools.find_single_match(data0,patron) if url: itemlist.append(Item(channel=__channel__, action="listadoSeries", title=">> Página siguiente", url=url, text_color= color2, fanart=fanart, thumbnail= thumbnail_host, extra= '0')) return itemlist
def peliculas(item): logger.info("pelisalacarta.channels.cinetux peliculas") itemlist = [] item.text_color = color2 # Descarga la página data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) # Extrae las entradas (carpetas) patron = '<div class="item">.*?href="([^"]+)".*?src="([^"]+)"' patron += '.*?<h3 class="name"><a.*?>([^<]+)</a>' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: try: fulltitle, year = scrapedtitle.rsplit("(", 1) year = scrapertools.get_match(year, "(\d{4})") except: fulltitle = scrapedtitle year = "" if DEBUG: logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") new_item = item.clone( action="findvideos", title=scrapedtitle, fulltitle=fulltitle, url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={}, contentTitle=fulltitle, context="05", viewmode="list", ) if year != "": new_item.infoLabels["year"] = int(year) itemlist.append(new_item) try: tmdb.set_infoLabels(itemlist, __modo_grafico__) except: pass # Extrae el paginador next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>»</span>') if next_page_link != "": itemlist.append( item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link, text_color=color3) ) return itemlist
def temporadas(item): logger.info("pelisalacarta.channels.pelispedia episodios") itemlist = [] # Descarga la página data = scrapertools.anti_cloudflare(item.url, host=CHANNEL_HOST, headers=CHANNEL_DEFAULT_HEADERS) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) if not item.fanart: patron = '<div class="hero-image"><img src="([^"]+)"' item.fanart = scrapertools.find_single_match(data, patron) patron = '<h3 class="pt15 pb15 dBlock clear seasonTitle">([^<]+).*?' patron += '<div class="bpM18 bpS25 mt15 mb20 noPadding"><figure><img src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) > 1: for scrapedseason,scrapedthumbnail in matches: temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') newItem = item.clone(text_color=color2, action="episodios", season=temporada, thumbnail=scrapedthumbnail) newItem.infoLabels['season'] = temporada newItem.extra="" itemlist.append(newItem) # Obtenemos los datos de todas las temporadas de la serie mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) if i.infoLabels['title']: # Si la temporada tiene nombre propio añadirselo al titulo del item i.title += " - %s" % (i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si la temporada tiene poster propio remplazar al de la serie i.thumbnail = i.infoLabels['poster_path'] itemlist.sort(key=lambda item: item.title) # Opción "Añadir esta serie a la biblioteca de XBMC" if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist else: return episodios(item)
def get_temporadas(item): logger.info() itemlist = [] infoLabels = {} data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",httptools.downloadpage(item.url).data) patron ='vars.title =(.*?)};' try: data_dict= jsontools.load_json(scrapertools.get_match(data,patron) +'}') except: return itemlist # Devolvemos lista vacia if item.extra == "serie_add": itemlist= get_episodios(item) else: if len(data_dict["season"]) == 1: # Si solo hay una temporada ... item.infoLabels['season'] = data_dict["season"][0]["number"] itemlist= get_episodios(item) else: #... o si hay mas de una temporada item.viewcontent = "seasons" data_dict["season"].sort(key=lambda x:(x['number'])) # ordenamos por numero de temporada for season in data_dict["season"]: # filtramos enlaces por temporada enlaces= filter(lambda l: l["season"]== season['number'],data_dict["link"]) if enlaces: item.infoLabels['season'] = season['number'] title= '%s Temporada %s' % (item.title, season['number']) itemlist.append(item.clone(action="get_episodios", title=title, text_color="0xFFFFCE9C", viewmode="movie_with_plot")) # Obtenemos los datos de todas las temporadas mediante multihilos tmdb.set_infoLabels(itemlist) if config.get_library_support() and itemlist: infoLabels ={'tmdb_id':item.infoLabels['tmdb_id'], 'tvdb_id':item.infoLabels['tvdb_id'], 'imdb_id': item.infoLabels['imdb_id']} itemlist.append(Item(channel= item.channel, title ="Añadir esta serie a la biblioteca", text_color="0xFFe5ffcc", action="add_serie_to_library", extra='get_episodios###serie_add', url=item.url, contentSerieName= data_dict["title"], infoLabels=infoLabels, thumbnail = 'https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png')) return itemlist
def busqueda(item): logger.info() itemlist = [] item.text_color = color2 # Descarga la página data = httptools.downloadpage(item.url).data from core import jsontools data = jsontools.load_json(data) for entry in data["results"]: try: title = entry["richSnippet"]["metatags"]["ogTitle"] url = entry["richSnippet"]["metatags"]["ogUrl"] thumbnail = entry["richSnippet"]["metatags"]["ogImage"] except: continue try: title_split = re.split(r"\s*\((\d)", title, 1) year = title_split[1]+scrapertools.find_single_match(title_split[2], '(\d{3})\)') fulltitle = title_split[0] except: fulltitle = title year = "" if not "DVD" in title and not "HDTV" in title and not "HD-" in title: continue infolabels = {'year': year} new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle, url=url, thumbnail=thumbnail, infoLabels=infolabels, contentTitle=fulltitle, contentType="movie") itemlist.append(new_item) try: tmdb.set_infoLabels(itemlist, __modo_grafico__) except: pass actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)')) totalresults = int(data["cursor"]["resultCount"]) if actualpage + 20 <= totalresults: url_next = item.url.replace("start="+str(actualpage), "start="+str(actualpage+20)) itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next)) return itemlist
def peliculas(item): logger.info() itemlist = [] item.text_color = color2 # Descarga la página data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);') from core import jsontools data = jsontools.load_json(data)["feed"] for entry in data["entry"]: for link in entry["link"]: if link["rel"] == "alternate": title = link["title"] url = link["href"] break thumbnail = entry["media$thumbnail"]["url"].replace("s72-c/", "") try: title_split = re.split(r"\s*\((\d)", title, 1) year = title_split[1]+scrapertools.find_single_match(title_split[2], '(\d{3})\)') fulltitle = title_split[0] except: fulltitle = title year = "" if not "DVD" in title and not "HDTV" in title and not "HD-" in title: continue infolabels = {'year': year} new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle, url=url, thumbnail=thumbnail, infoLabels=infolabels, contentTitle=fulltitle, contentType="movie") itemlist.append(new_item) try: tmdb.set_infoLabels(itemlist, __modo_grafico__) except: pass actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)')) totalresults = int(data["openSearch$totalResults"]["$t"]) if actualpage + 20 < totalresults: url_next = item.url.replace("start-index="+str(actualpage), "start-index="+str(actualpage+20)) itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=url_next)) return itemlist
def peliculas(item): logger.info() itemlist = [] item.text_color = color2 # Descarga la página data = httptools.downloadpage(item.url).data # Extrae las entradas (carpetas) patron = '<div class="item">.*?<div class="audio">\s*([^<]*)<.*?href="([^"]+)".*?src="([^"]+)"' \ '.*?<h3 class="name"><a.*?>([^<]+)</a>' matches = scrapertools.find_multiple_matches(data, patron) for calidad, scrapedurl, scrapedthumbnail, scrapedtitle in matches: try: fulltitle, year = scrapedtitle.rsplit("(", 1) year = scrapertools.get_match(year, '(\d{4})') if "/" in fulltitle: fulltitle = fulltitle.split(" /", 1)[0] scrapedtitle = "%s (%s)" % (fulltitle, year) except: fulltitle = scrapedtitle year = "" if calidad: scrapedtitle += " [%s]" % calidad new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentTitle=fulltitle, contentType="movie") if year: new_item.infoLabels['year'] = int(year) itemlist.append(new_item) try: tmdb.set_infoLabels(itemlist, __modo_grafico__) except: pass # Extrae el paginador next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>»</span>') if next_page_link: itemlist.append(item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link, text_color=color3)) return itemlist
def temporadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data # ~ logger.debug(data) item_id = scrapertools.find_single_match( data, 'data-json=\'\{"item_id": "([^"]+)') url = 'https://www.dilo.nu/api/web/seasons.php' post = 'item_id=%s' % item_id data = jsontools.load(httptools.downloadpage(url, post=post).data) for tempo in data: itemlist.append( item.clone(action='episodios', title='Temporada %s' % tempo['number'], item_id=item_id, contentType='season', contentSeason=tempo['number'])) tmdb.set_infoLabels(itemlist) return itemlist
def series(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?' patron += 'class="link-title"><h2>([^<]+)</h2>' # title matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle in matches: itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie', url=scrapedurl, thumbnail=host + scrapedthumbnail, contentSerieName=scrapedtitle, show=scrapedtitle, action="temporadas", contentType='tvshow')) tmdb.set_infoLabels(itemlist, __modo_grafico__) pagination = scrapertools.find_single_match( data, '<li><a href="([^"]+)" rel="next">') if pagination: itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png"))) return itemlist
def temporadas(item): logger.info() itemlist = [] # Si viene de list_latest, limpiar season, episode if item.contentEpisodeNumber: item.__dict__['infoLabels'].pop('episode') if item.contentSeason: item.__dict__['infoLabels'].pop('season') data = do_downloadpage(item.url) dict_data = jsontools.load(data) if 'title' not in dict_data: return itemlist for element in dict_data['title']['seasons']: itemlist.append( item.clone(action='episodios', title='Temporada ' + str(element['number']), contentType='season', contentSeason=element['number'])) tmdb.set_infoLabels(itemlist) return itemlist
def destacadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = 'Películas destacadas(.*?)</section>' bloque = scrapertools.find_single_match(data, patron) patron = 'href="([^"]+).*?' patron += 'title="([^"]+).*?' patron += 'data-src="([^"]+).*?' patron += 'data-year="([^"]+)' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches: scrapedtitle = scrapedtitle.replace("Película ", "") itemlist.append( Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle, infoLabels={'year': scrapedyear}, thumbnail=host + scrapedthumbnail, title=scrapedtitle + " (%s)" % scrapedyear, url=host + scrapedurl)) tmdb.set_infoLabels(itemlist) return itemlist
def estrenos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url, canonical=canonical).data patron = 'item-pelicula.*?href="([^"]+).*?' patron += 'src="([^"]+).*?' patron += 'text-center">([^<]+).*?' patron += '<p>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches: scrapedtitle = scrapedtitle.replace("Película ", "") scrapedtitle = quitano(scrapedtitle) itemlist.append( Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle, infoLabels={'year': scrapedyear}, thumbnail=scrapedthumbnail, title=scrapedtitle + " (%s)" % scrapedyear, url=scrapedurl, language='LAT')) tmdb.set_infoLabels(itemlist) return itemlist
def sub_search(item): logger.info() itemlist = [] url = item.url data = httptools.downloadpage(item.url, canonical=canonical).data token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)') data_js = httptools.downloadpage(item.url + "&_token=" + token, headers={ 'X-Requested-With': 'XMLHttpRequest' }).json for js in data_js["data"]["m"]: js["title"] = quitano(js["title"]) itemlist.append( Item(channel=item.channel, action="findvideos", contentTitle=js["title"], infoLabels={'year': js["release_year"]}, thumbnail=js["cover"], title=js["title"] + " (%s)" % js["release_year"], url=js["slug"])) tmdb.set_infoLabels(itemlist) return itemlist
def episodios(item): logger.info() itemlist = [] data = do_downloadpage(item.url) patron = "<a href='([^']+)'>.*?(\d+)X(\d+) - (.*?)</a> (.*?)(?:<br>|</div)" episodes = re.findall(patron, data, re.MULTILINE | re.DOTALL) for url, season, episode, title, langs in episodes: if item.contentSeason and item.contentSeason != int(season): continue languages = ', '.join([IDIOMAS.get(lang, lang) for lang in re.findall('banderas/([^\.]+)', langs)]) titulo = '%sx%s %s [%s]' % (season, episode, title, languages) itemlist.append(item.clone( action='findvideos', url=urlparse.urljoin(HOST, url), title=titulo, contentType = 'episode', contentSeason = season, contentEpisodeNumber = episode )) tmdb.set_infoLabels(itemlist) return itemlist
def temporadas(item): logger.info() itemlist = [] data = do_downloadpage(item.url) # Si viene de novedades, limpiar season, episode if item.contentEpisodeNumber: item.__dict__['infoLabels'].pop('episode') if item.contentSeason: item.__dict__['infoLabels'].pop('season') temporadas = re.findall('Temporada (\d+)', data) for tempo in sorted(temporadas, key=lambda x: int(x)): tempo = int(tempo) itemlist.append( item.clone(action='episodios', title='Temporada ' + str(tempo), contentType='season', contentSeason=tempo)) tmdb.set_infoLabels(itemlist) return itemlist
def search(item, texto): logger.info("texto=%s" % texto) itemlist = [] try: item.url = urlparse.urljoin(HOST, 'all.php') data = do_downloadpage(item.url) matches = re.findall("<a href='([^']+)' target='_blank'>([^<]+)</a>", data) for url, title in matches: if texto not in title.lower(): continue itemlist.append(item.clone( title=title, url=urlparse.urljoin(HOST, url), action='temporadas', contentType='tvshow', contentSerieName=title )) tmdb.set_infoLabels(itemlist) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return itemlist
def shows(item): logger.info() itemlist = [] #Falsa paginacion paginacion = 32 if not item.page: item.page = 0 next_page2 = item.page + paginacion #descarga la pagina html data = load_data(item.url) pattern = '"in"><a href="([^"]+)">(.*?)</a>' matches = scrapertools.find_multiple_matches(data, pattern) cnt = len(matches) for link, title in matches[item.page:item.page + paginacion]: itemlist.append( Item(channel=item.channel, title=title, contentSerieName=title, url=host + link, action="seasons")) if next_page2 < cnt: itemlist.append( Item(channel=item.channel, title='Siguiente >>', url=item.url, action="shows", page=next_page2)) tmdb.set_infoLabels(itemlist, True) return itemlist
def list_latest(item): logger.info() itemlist = [] data = get_source(item.url) data_url = scrapertools.find_single_match(data, '<iframe.*?src=(.*?) ') data = get_source(data_url) patron = "<div class='online'>.*?<img src=(.*?) class=.*?alt=(.*?) title=.*?" patron += "<b><a href=(.*?) target=.*?align=right><div class=s7>(.*?) <" matches = re.compile(patron, re.DOTALL).findall(data) count = 0 for thumbnail, title, url, language in matches: count += 1 if count >= item.indexp and count < item.indexp + perpage: path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)") filtro_list = {"poster_path": path} filtro_list = filtro_list.items() itemlist.append( Item(channel=item.channel, title=title, fulltitle=title, contentTitle=title, url=host + url, thumbnail=thumbnail, language=language, infoLabels={'filtro': filtro_list}, extra="one", action='findvideos')) tmdb.set_infoLabels(itemlist) item.indexp += perpage itemlist.append( Item(channel=item.channel, title="Siguiente >>", url=item.url, extra="one", indexp=item.indexp, action='list_latest')) return itemlist
def search(item, texto): logger.info("texto: %s" % texto) itemlist = [] try: post = {"n": texto} data = httptools.downloadpage( host + 'wp-content/themes/wikiSeries/searchajaxresponse.php', post=urllib.urlencode(post)).data # ~ logger.debug(data) matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data) for url, data_show in matches: title = scrapertools.find_single_match( data_show, '<span class="titleinst">([^<]*)</span>') year = scrapertools.find_single_match( data_show, '<span class="titleinst year">([^<]*)</span>') thumb = scrapertools.find_single_match(data_show, 'src="([^"]+)"') itemlist.append( item.clone(action='temporadas', url=url, title=title, thumbnail=thumb, contentType='tvshow', contentSerieName=title, infoLabels={'year': year})) tmdb.set_infoLabels(itemlist) return itemlist except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) if item.title == "Peliculas Animadas": data_lista = scrapertools.find_single_match( data, '<div id="archive-content" class="animation-2 items">(.*)<a href=\'' ) else: data_lista = scrapertools.find_single_match( data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>') patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>' matches = scrapertools.find_multiple_matches(data_lista, patron) for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedplot in matches: if item.title == "Peliculas Animadas": itemlist.append( item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie", plot=scrapedplot, action="findvideos", show=scrapedtitle)) else: itemlist.append( item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, context=autoplay.context, plot=scrapedplot, action="episodios", show=scrapedtitle)) if item.title != "Peliculas Animadas": tmdb.set_infoLabels(itemlist) return itemlist
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<a href="([^"]+)" ' patron += 'class="link">.+?<img src="([^"]+)".*?' patron += 'title="([^"]+)">' matches = scrapertools.find_multiple_matches(data, patron) # Paginacion num_items_x_pagina = 30 min = item.page * num_items_x_pagina min=min-item.page max = min + num_items_x_pagina - 1 b=0 for link, img, name in matches[min:max]: b=b+1 if " y " in name: title=name.replace(" y "," & ") else: title = name url = host + link scrapedthumbnail = host + img context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title, context=context)) if b<29: pass else: itemlist.append( Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1)) tmdb.set_infoLabels(itemlist) return itemlist
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '<div id="mt-\d+".*?<a href="([^"]+)".*?' patron += '<img src="([^"]+)" alt="([^"]+)".*?' patron += '<span class="year">(\d+)</span>.*?' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: scrapedtitle = scrapedtitle.replace("Ver", "").replace("online", "") title = '%s (%s)' % (scrapedtitle, scrapedyear) url = scrapedurl new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={'year': scrapedyear}) if '/serie/' in url: new_item.action = 'temporadas' new_item.contentSerieName = scrapedtitle else: new_item.action = 'findvideos' new_item.contentTitle = scrapedtitle itemlist.append(new_item) tmdb.set_infoLabels(itemlist, True) next_page_url = scrapertools.find_single_match( data, '<a href="([^"]+)">Siguiente</a>') if next_page_url != "": next_page_url = urlparse.urljoin(item.url, next_page_url) itemlist.append( item.clone(channel=item.channel, action="lista", title="Next page >>", text_color="blue", url=next_page_url)) return itemlist
def last_seasons(item): logger.info() itemlist = [] data = do_downloadpage(item.url) matches = re.compile(' class="item se seasons"(.*?)</article>', re.DOTALL).findall(data) for article in matches: url = scrapertools.find_single_match(article, ' href="([^"]+)"') thumb = scrapertools.find_single_match(article, ' src="([^"]+)"') title = scrapertools.find_single_match(article, '<span class="c">(.*?)</span>') numtempo = scrapertools.find_single_match( article, '<span class="b">(\d+)</span>') if not url or not title or not numtempo: continue itemlist.append( item.clone(action='episodios', title='%s - Temporada %s' % (title, numtempo), thumbnail=thumb, url=url, contentType='season', contentSeason=numtempo, contentSerieName=title)) tmdb.set_infoLabels(itemlist) next_page_link = scrapertools.find_single_match( data, ' href="([^"]+)" ><span class="icon-chevron-right">') if next_page_link: itemlist.append( item.clone(title='>> Página siguiente', url=next_page_link, action='list_all')) return itemlist
def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) # logger.info(data) patron = '<div class="thumbnail animation-2"><a href="([^"]+)">.*?' # url patron += '<img src="([^"]+)" alt="([^"]+)" />.*?' # img and title patron += '<span class="([^"]+)".*?' # tipo patron += '<span class="year">([^<]+)</span>' # year matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches: itemlist.append( item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle, action="findvideos", infoLabels={"year": year}, thumbnail=scrapedthumbnail, text_color=color3, page=0)) paginacion = scrapertools.find_single_match( data, '<a class="page larger" href="([^"]+)">\d+</a>') if paginacion: itemlist.append( Item(channel=item.channel, action="sub_search", title="» Siguiente »", url=paginacion)) tmdb.set_infoLabels(itemlist) return itemlist
def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)') post = "page=%s&type=%s&_token=%s" %(item.page, item.type, token) if item.slug: post += "&slug=%s" %item.slug data = httptools.downloadpage(host + "/pagination", post=post, headers={'X-Requested-With': 'XMLHttpRequest'}).data patron = '(?s)href="([^"]+)".*?' patron += 'src="([^"]+)".*?' patron += 'text-center">([^<]+).*?' patron += '<p>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches: scrapedtitle = quitano(scrapedtitle) itemlist.append(Item(channel = item.channel, action = "findvideos", contentTitle = scrapedtitle, infoLabels = {'year':scrapedyear}, thumbnail = scrapedthumbnail, title = scrapedtitle + " (%s)" %scrapedyear, url = scrapedurl, language= 'LAT' )) tmdb.set_infoLabels(itemlist) #pagination if len(itemlist)>0: itemlist.append(Item(channel = item.channel, action = "peliculas", page = item.page + 1, title = "Página siguiente >>", type = item.type, slug = item.slug, url = item.url )) return itemlist
def list_all(item): logger.info() itemlist = [] data = get_source(item.url) patron = '<article class=".*?">.*? href="([^"]+)".*?<img src="([^"]+)".*?<h3 class="card-tvshow__title">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: title = scrapedtitle thumbnail = scrapedthumbnail url = scrapedurl action = 'seasons' new_item = Item( channel=item.channel, action=action, title=title, url=url, contentSerieName=scrapedtitle, thumbnail=thumbnail, ) itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Paginación url_next_page = scrapertools.find_single_match( data, '<li><a href="([^"]+)" rel="next">') if url_next_page: itemlist.append( item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) return itemlist
def episodesxseason(item): logger.info() itemlist = [] episodes = item.json_episodios for episode in episodes: infoLabels = item.infoLabels language = item.language if item.language else 'VOSE' it = Item( action = 'findvideos', channel = item.channel, contentType = item.contentType, infoLabels = infoLabels, language = language, thumbnail = item.thumbnail, title = item.title, urls = episode['players'], url = item.url, viewType = 'videos' ) # Determinación dinámica de contentType if not item.contentType == 'movie': it.title = (config.get_localized_string(60036) % episode['episode']) it.contentEpisodeNumber = episode['episode'] itemlist.append(it) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Si es peli, mandamos directo a findvideos if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data matches = re.compile("<li class='mark-\d+'><div class='imagen'>(.*?)</li>", re.DOTALL).findall(data) for data_epi in matches: # ~ logger.debug(data_epi) try: season, episode = scrapertools.find_single_match( data_epi, "<div class='numerando'>(\d+)\s*-\s*(\d+)") except: continue if item.contentSeason and item.contentSeason != int(season): continue thumb = scrapertools.find_single_match(data_epi, " src='([^']+)") url, title = scrapertools.find_single_match(data_epi, " href='([^']+)'>([^<]+)") titulo = '%sx%s %s' % (season, episode, title) itemlist.append( item.clone(action='findvideos', url=url, title=titulo, thumbnail=thumb, contentType='episode', contentSeason=season, contentEpisodeNumber=episode)) tmdb.set_infoLabels(itemlist) return itemlist
def findvideos(item): logger.info() itemlist = [] encontrados = [] data = httptools.downloadpage(item.url).data patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, server_name, language, quality in matches: if scrapedurl in encontrados: continue encontrados.append(scrapedurl) language = language.strip() quality = quality.strip() mq = "(" + quality + ")" if "http" in quality: quality = mq = "" titulo = "%s (" + language + ") " + mq itemlist.append(item.clone(channel=item.channel, action = "play", title = titulo, url = scrapedurl, folder = False, language = language, quality = quality )) tmdb.set_infoLabels(itemlist, True) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) if itemlist: itemlist.append(Item(channel=item.channel)) itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", text_color="magenta")) # Opción "Añadir esta película a la biblioteca de KODI" if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green", action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail, fulltitle=item.fulltitle)) return itemlist
def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data if "Próximamente" in data: itemlist.append(Item(channel = item.channel, title = "Próximamente")) return itemlist patron = 'data-link="([^"]+).*?' patron += '>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for url, calidad in matches: calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+") itemlist.append(item.clone( channel = item.channel, action = "play", title = calidad, thumbnail = item.thumbnail, contentThumbnail = item.thumbnail, url = url, language = IDIOMAS['Latino'] )) itemlist = servertools.get_servers_itemlist(itemlist) tmdb.set_infoLabels(itemlist, seekTmdb = True) itemlist.append(Item(channel=item.channel)) if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, contentTitle = item.contentTitle )) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) return itemlist
def estrenos(item): logger.info() data = httptools.downloadpage(HOST).data data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) if item.extra == "cast": patron0 = '<h3>Estreno Español</h3>(.*?)<div class=clearfix></div>' language = 'Español' site = "estreno-serie-castellano/" elif item.extra == "latino": patron0 = '<h3>Estreno Español Latino</h3>(.*?)<div class=clearfix></div>' language = 'Latino' site = "estreno-serie-espanol-latino/" else: patron0 = '<h3>Estreno Subtitulado</h3>(.*?)<div class=clearfix></div>' language = 'VOSE' site = "estreno-serie-sub-espanol/" patron = 'sidebarestdiv><a title=(.*?\d+X\d+) .*? href=(.*?)>.*?src=(.*?)>' match = scrapertools.find_single_match(data, patron0) matches = re.compile(patron, re.DOTALL).findall(match) itemlist = [] for title, url, img in matches: show = scrapertools.find_single_match(title, '(.*?) \d+X\d+') itemlist.append( item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=urlparse.urljoin(HOST, img), language=language, contentSerieName=show)) itemlist.append( item.clone(action="showmore", title="[COLOR blue]>> Mostrar más <<[/COLOR]", url=urlparse.urljoin(HOST, site), thumbnail="", extra=language)) tmdb.set_infoLabels(itemlist) return itemlist
def peliculas(item): logger.info() itemlist = [] item.text_color = color2 data = httptools.downloadpage(item.url).data patron = '(?s)class="(?:result-item|item movies)">.*?<img src="([^"]+)' patron += '.*?alt="([^"]+)"' patron += '(.*?)' patron += 'href="([^"]+)"' patron += '.*?(?:<span>|<span class="year">)(.+?)<' matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedtitle, quality, scrapedurl, scrapedyear in matches: quality = scrapertools.find_single_match(quality, '.*?quality">([^<]+)') try: contentTitle = scrapedtitle year = scrapertools.find_single_match(scrapedyear,'\d{4}') if "/" in contentTitle: contentTitle = contentTitle.split(" /", 1)[0] scrapedtitle = "%s (%s)" % (contentTitle, year) except: contentTitle = scrapedtitle if quality: scrapedtitle += " [%s]" % quality new_item = item.clone(action="findvideos", title=scrapedtitle, contentTitle=contentTitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie", quality=quality) if year: new_item.infoLabels['year'] = int(year) itemlist.append(new_item) tmdb.set_infoLabels(itemlist, __modo_grafico__) # Extrae el paginador next_page_link = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)') if next_page_link: itemlist.append(item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link, text_color=color3)) return itemlist
def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<a class="sres-wrap clearfix" href="([^"]+)">' #url patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title patron += '<div class="sres-desc">(.*?)</div>' # plot matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, plot in matches: itemlist.append( item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle, action="findvideos", text_color=color3, page=0, plot=plot, thumbnail=host + scrapedthumbnail)) pagination = scrapertools.find_single_match( data, 'class="pnext"><a href="([^"]+)">') if pagination: itemlist.append( Item(channel=__channel__, action="sub_search", title="» Siguiente »", url=pagination)) tmdb.set_infoLabels(itemlist) return itemlist
def searchMovies(item): itemlist = [] data = load_data(item.url) pattern = 'class="image">.*?href="([^"]+)".*?' pattern += 'src="([^"]+)" alt="([^"]+)".*?' pattern += 'class="year">(\d+)</span>.*?' pattern += '<p>(.*?)</p>' matches = scrapertools.find_multiple_matches(data, pattern) for link, img, title, year, plot in matches: itemTitle = "%s [COLOR blue](%s)[/COLOR]" % (title, year) fullimg = img.replace('-150x150', '') itemlist.append( Item(channel=item.channel, title=itemTitle, contentTitle=title, thumbnail=fullimg, url=link, plot=plot, action="findvideos", language="LAT", infoLabels={'year': year})) next_page = scrapertools.find_single_match( data, 'href="([^"]+)" ><span class="icon-chevron-right">') if next_page: itemlist.append( Item(channel=item.channel, title="Siguiente Pagina", url=next_page, action="searchMovies")) tmdb.set_infoLabels(itemlist, True) return itemlist
def search(item, texto): logger.info("texto=%s" % texto) itemlist = [] try: item.url = HOST + '?s=' + texto.replace(" ", "+") data = do_downloadpage(item.url) matches = re.findall('<article (.*?)</article>', data, re.DOTALL) for serie_data in matches: url = scrapertools.find_single_match(serie_data, ' href="([^"]+)') if not url: continue title = scrapertools.find_single_match(serie_data, ' title="([^"]+)') if not title: continue img = scrapertools.find_single_match(serie_data, ' src="([^"]+)') itemlist.append( item.clone(title=title, url=url, action='temporadas', contentType='tvshow', contentSerieName=title, thumbnail=img)) tmdb.set_infoLabels(itemlist) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return itemlist
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data_lista = scrapertools.find_single_match( data, '<div class="su-list su-list-style-"><ul>(.+?)<\/ul><\/div>') patron = "<a href='(.+?)'>(.+?)<\/a>" matches = scrapertools.find_multiple_matches(data_lista, patron) for link, name in matches: title = name + " [Latino]" url = link context1 = [autoplay.context] itemlist.append( item.clone(title=title, url=url, plot=title, action="episodios", show=title, context=context1)) tmdb.set_infoLabels(itemlist) return itemlist
def sub_search(item): logger.info() itemlist = [] headers = {'Referer': host, 'X-Requested-With': 'XMLHttpRequest'} dict_data = httptools.downloadpage(item.url, headers=headers).json list = dict_data["data"][item.type] if item.type == "m": action = "findvideos" else: action = "seasons" for dict in list: itemlist.append( item.clone(channel=item.channel, action=action, contentTitle=dict["title"], show=dict["title"], infoLabels={"year": dict["release_year"]}, thumbnail="http://static.pelisfox.tv/static/movie/" + dict["cover"], title=dict["title"] + " (" + dict["release_year"] + ")", url=host + dict["slug"])) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def listadoTemporadas(item): logger.info("pelisalacarta.channels.tremendaseries listadoTemporadas") itemlist = [] data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(item.url)) patron = '<div class="tit_enlaces"><ul>(.*?)<div class="addthis_sharing_toolbox"' data = scrapertools.find_single_match(data,patron) #logger.debug(data) patron = '<a href="javascript:void\(\);">([^<]+)<br>' matches = scrapertools.find_multiple_matches(data,patron) for scrapedtitle in matches: temporada = scrapertools.find_single_match(scrapedtitle, '(\d+)') newItem= item.clone(title= scrapedtitle, text_color= color1, action="listadoCapitulos", extra=temporada) newItem.infoLabels['season'] = temporada itemlist.append(newItem) # Obtenemos los datos de todas las temporadas de la serie mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: i.title = "%s. %s" %(i.infoLabels['season'],i.infoLabels['tvshowtitle']) if i.infoLabels['title']: # Si la temporada tiene nombre propio añadirselo al titulo del item i.title += " - %s" %(i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si la temporada tiene poster propio remplazar al de la serie i.thumbnail = i.infoLabels['poster_path'] ''' if config.get_library_support(): logger.debug(item.url) itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios###serie_add", show= item.show, thumbnail = thumbnail_host, fanart= fanart, text_color= color3))''' return itemlist
def listado(item): logger.info() itemlist = [] try: data_dict = jsontools.load_json(httptools.downloadpage(item.url).data) except: return itemlist # Devolvemos lista vacia #Filtrado y busqueda if item.filtro: for i in data_dict["result"][:]: if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \ (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()): data_dict["result"].remove(i) if not item.page: item.page = 0 offset= int(item.page) * 30 limit= offset + 30 for i in data_dict["result"][offset:limit]: infoLabels = InfoLabels() idioma = '' if item.extra == "movie": action= "findvideos" #viewcontent = 'movies' infoLabels["title"]= i["title"] title= '%s (%s)' % (i["title"], i['year'] ) url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"])) elif item.extra=="series": action="get_temporadas" #viewcontent = 'seasons' title= i["title"] infoLabels['tvshowtitle']= i["title"] url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"])) else: #item.extra=="series_novedades": action="findvideos" #viewcontent = 'episodes' infoLabels['tvshowtitle'] = i["title"] infoLabels['season']=i['season'] infoLabels['episode']=i['episode'].zfill(2) flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)') idioma=i["label"].replace(flag,"") title = '%s %sx%s (%s)' %(i["title"], infoLabels["season"], infoLabels["episode"], idioma) url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"])) if i.has_key("poster") and i["poster"]: thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"]) else: thumbnail= item.thumbnail if i.has_key("background") and i["background"]: fanart= i["background"] else: fanart= item.fanart # Rellenamos el diccionario de infoLabels infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com if i['genre']: infoLabels['genre']=i['genre'] if i['year']: infoLabels['year']=i['year'] #if i['tagline']: infoLabels['plotoutline']=i['tagline'] if i['plot']: infoLabels['plot']=i['plot'] else: infoLabels['plot']="" if i['runtime']: infoLabels['duration']=int(i['runtime'])*60 if i['imdb_rating']: infoLabels['rating']=i['imdb_rating'] elif i['tmdb_rating']: infoLabels['rating']=i['tmdb_rating'] if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id'] if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id'] newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent, language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) newItem.year=i['year'] newItem.contentTitle=i['title'] if 'season' in infoLabels and infoLabels['season']: newItem.contentSeason = infoLabels['season'] if 'episode' in infoLabels and infoLabels['episode']: newItem.contentEpisodeNumber = infoLabels['episode'] itemlist.append(newItem) # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Paginacion if len(data_dict["result"]) > limit: itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) ) return itemlist
def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","",data) patron = '<div class="MiniFicha">.*?' patron += '<img src="([^"]+).*?' patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?' patron += '<div class="OpcionesDescargasMini">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) for thumbnail, title, cat_padres, cat_hijos, opciones in matches: #logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # Obtenemos el año del titulo y eliminamos lo q sobre patron = '\d{4}$' year = scrapertools.find_single_match(title,patron) if year: title = re.sub(patron, "", title) patron = '\s?-?\s?(line)?\s?-\s?$' title = re.sub(patron, "", title,flags=re.IGNORECASE) # Obtenemos la imagen b por q es mayor thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] # Buscamos opcion de ver online patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' url_ver = scrapertools.find_single_match(opciones, patron) if url_ver: new_item = Item(channel=item.channel, action="findvideos", title=title, thumbnail=thumbnail, url=url_ver, infoLabels={"year":year}, text_color = color1) cat_padres = cat_padres.strip() if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]: #if item.extra == 'movie': new_item.contentTitle = title new_item.extra = "movie" # Filtramos nombres validos para la calidad patron = ("rip|dvd|screener|hd|ts|Telesync") if re.search(patron,cat_hijos,flags=re.IGNORECASE): new_item.contentQuality = cat_hijos new_item.title = "%s [%s]" % (title, cat_hijos) elif cat_padres == "peliculas-dvdrip": new_item.contentQuality = "DVDRIP" new_item.title = "%s [DVDRIP]" % title elif cat_padres == "HDRIP": new_item.contentQuality = "HDRIP" new_item.title = "%s [HDRIP]" % title elif cat_padres == "series": new_item.contentSerieName = cat_hijos patron = re.compile('(\d+)x(\d+)') matches = patron.findall(title) if len(matches) == 1: new_item.contentSeason = matches[0][0] new_item.contentEpisodeNumber = matches[0][1].zfill(2) new_item.extra = "episodie" else: # matches == [('1', '01'), ('1', '02'), ('1', '03')] new_item.extra = "multi-episodie" else: #Otras categorias q de momento no nos interesan continue ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)' new_item.url_descarga = scrapertools.find_single_match(opciones,patron) patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?' new_item.url_torrent = scrapertools.find_single_match(opciones,patron)''' itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion patron = '<div class="sPages">.*?' patron += '<a href="([^"]+)">Siguiente' url_next_page = scrapertools.find_single_match(data,patron) if url_next_page: itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True, text_color = color3, text_blod=True)) return itemlist
def listado(item): logger.info("pelisalacarta.channels.pelispedia listado") itemlist = [] action = "findvideos" if item.extra == 'serie': action = "temporadas" data = scrapertools.anti_cloudflare(item.url , host=CHANNEL_HOST , headers=CHANNEL_DEFAULT_HEADERS ) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) # logger.info("data -- {}".format(data)) patron = '<li[^>]+><a href="([^"]+)" alt="([^<]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \ '<p class="font12">(.*?)</p>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches[:28]: title = "{title} ({year})".format(title=scrapertools.unescape(scrapedtitle.strip()), year=scrapedyear) plot = scrapertools.entityunescape(scrapedplot) new_item= Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action, thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra, text_color= color3) if item.extra == 'serie': new_item.show = scrapertools.unescape(scrapedtitle.strip()) else: new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip()) new_item.infoLabels = {'year':scrapedyear} #logger.debug(new_item.tostring()) itemlist.append(new_item) # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) # numero de registros que se muestran por página, se fija a 28 por cada paginación if len(matches) >= 28: file_php = "more" tipo_serie = "" if item.extra == "movies": anio = scrapertools.find_single_match(item.url, "(?:year=)(\w+)") letra = scrapertools.find_single_match(item.url, "(?:letra=)(\w+)") genero = scrapertools.find_single_match(item.url, "(?:gender=|genre=)(\w+)") params = "letra={letra}&year={year}&genre={genero}".format(letra=letra, year=anio, genero=genero) else: tipo2 = scrapertools.find_single_match(item.url, "(?:series/|tipo2=)(\w+)") tipo_serie = "&tipo=serie" if tipo2 != "all": file_php = "letra" tipo_serie += "&tipo2="+tipo2 genero = "" if tipo2 == "anio": genero = scrapertools.find_single_match(item.url, "(?:anio/|genre=)(\w+)") if tipo2 == "genero": genero = scrapertools.find_single_match(item.url, "(?:genero/|genre=)(\w+)") if tipo2 == "letra": genero = scrapertools.find_single_match(item.url, "(?:letra/|genre=)(\w+)") params = "genre={genero}".format(genero=genero) url = "http://www.pelispedia.tv/api/{file}.php?rangeStart=28&rangeEnd=28{tipo_serie}&{params}".\ format(file=file_php, tipo_serie=tipo_serie, params=params) if "rangeStart" in item.url: ant_inicio = scrapertools.find_single_match(item.url, "rangeStart=(\d+)&") inicio = str(int(ant_inicio)+28) url = item.url.replace("rangeStart="+ant_inicio, "rangeStart="+inicio) itemlist.append(Item(channel=__channel__, action="listado", title=">> Página siguiente", extra=item.extra, url=url, thumbnail=thumbnail_host, fanart= fanart_host, text_color= color2)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.cinetux findvideos") itemlist = [] try: filtro_idioma = config.get_setting("filterlanguages", item.channel) filtro_enlaces = config.get_setting("filterlinks", item.channel) except: filtro_idioma = 3 filtro_enlaces = 2 dict_idiomas = {"Español": 2, "Latino": 1, "Subtitulado": 0} # Busca el argumento data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) year = scrapertools.find_single_match(data, '<h1><span>.*?rel="tag">([^<]+)</a>') if year != "" and item.extra != "library": item.infoLabels["year"] = int(year) # Ampliamos datos en tmdb if item.infoLabels["plot"] == "": try: tmdb.set_infoLabels(item, __modo_grafico__) except: pass if item.infoLabels.get("plot") == "": plot = scrapertools.find_single_match(data, '<div class="sinopsis"><p>(.*?)</p>') item.infoLabels["plot"] = plot if filtro_enlaces != 0: list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item) if list_enlaces: itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1, text_blod=True)) itemlist.extend(list_enlaces) if filtro_enlaces != 1: list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "descarga", item) if list_enlaces: itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1, text_blod=True)) itemlist.extend(list_enlaces) if itemlist: itemlist.append( item.clone( channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", text_color="magenta" ) ) # Opción "Añadir esta película a la biblioteca de XBMC" if item.extra != "library": if config.get_library_support(): itemlist.append( Item( channel=item.channel, title="Añadir a la biblioteca", text_color="green", filtro=True, action="add_pelicula_to_library", url=item.url, infoLabels={"title": item.fulltitle}, fulltitle=item.fulltitle, extra="library", ) ) else: itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3)) return itemlist
def findvideostv(item): logger.info() itemlist = [] #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = httptools.downloadpage(item.url).data data = data.replace("\n", "").replace("\t", "") data = scrapertools.decodeHtmlentities(data) patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \ '" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \ % (str(item.infoLabels['episode']), str(item.infoLabels['season'])) matches = scrapertools.find_multiple_matches(data, patron) for quality, servidor_num, language, url in matches: try: server = SERVERS[servidor_num] if server == "tusfiles" and "stormo.tv" in url: server = "stormo" if server != "tusfiles": servers_module = __import__("servers."+server) except: server = servertools.get_server_from_url(url) if server != "directo": if server == "vimeo": url += "|" + item.url elif server == "tusfiles": url = "http://tusfiles.org/?%s" % url server = "directo" idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")" itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server)) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \ '" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \ % (str(item.infoLabels['episode']), str(item.infoLabels['season'])) #patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for quality, servidor_num, episode, language, url in matches: mostrar_server = True try: server = SERVERS[servidor_num] servers_module = __import__("servers."+server) except: server = servertools.get_server_from_url(url) if server != "directo": if server == "vimeo": url += "|" + item.url if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(server) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")" itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server)) itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title)) try: from core import tmdb tmdb.set_infoLabels(itemlist, __modo_grafico__) except: pass return itemlist
def findvideos(item): logger.info() itemlist = [] item.text_color = color3 #Rellena diccionarios idioma y calidad idiomas_videos, calidad_videos = dict_videos() data = httptools.downloadpage(item.url).data data = data.replace("\n", "").replace("\t", "") data = scrapertools.decodeHtmlentities(data) if item.extra != "library": try: from core import tmdb tmdb.set_infoLabels(item, __modo_grafico__) except: pass #Enlaces Online patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \ '"([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for calidad, servidor_num, language, url in matches: try: server = SERVERS[servidor_num] if server == "tusfiles" and "stormo.tv" in url: server = "stormo" if server != "tusfiles": servers_module = __import__("servers."+server) except: server = servertools.get_server_from_url(url) if server != "directo": if server == "vimeo": url += "|" + item.url elif server == "tusfiles": url = "http://tusfiles.org/?%s" % url server = "directo" idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = server.capitalize()+" ["+idioma+"] ["+calidad_videos.get(calidad)+"]" itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server)) #Enlace Descarga patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \ '"([^"]+)".*?online-link="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for calidad, servidor_num, language, url in matches: mostrar_server = True try: server = SERVERS[servidor_num] servers_module = __import__("servers."+server) except: server = servertools.get_server_from_url(url) if server != "directo": if server == "vimeo": url += "|" + item.url if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(server) if mostrar_server: idioma = IDIOMAS.get(idiomas_videos.get(language)) titulo = "["+server.capitalize()+"] ["+idioma+"] ["+calidad_videos.get(calidad)+"]" itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server)) itemlist.sort(key=lambda item: (item.extra, item.server)) if itemlist: if not "trailer" in item.infoLabels: trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>') item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=") itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", text_color="magenta", context="")) if item.extra != "library": if config.get_library_support(): itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", action="add_pelicula_to_library", url=item.url, text_color="green", infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle, extra="library")) return itemlist