def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match( data, '<div class="player">(.*?)<div class="media-info">') if "/kt_player." in data: patron = '(?:video_url|video_alt_url[0-9]*):\s*\'([^\']+)\'' else: patron = '<iframe src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl in matches: itemlist.append( item.clone(action="play", title="%s", contentTitle=item.title, url=scrapedurl)) itemlist = servertools.get_servers_itemlist( itemlist, lambda i: i.title % i.server.capitalize()) a = len(itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) data = scrapertools.find_single_match(data,'Streaming Server<(.*?)Screenshot<') patron = '(?:src|SRC)="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for url in matches: if "http://stream.yuuk.net/embeds.php" in url: data = httptools.downloadpage(url).data url = scrapertools.find_single_match(data,'"file": "([^"]+)"') itemlist.append( Item(channel=item.channel, action="play", title = "%s", contentTitle=item.title, url=url )) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) a = len (itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) data = scrapertools.find_single_match( data, '<div class="video-embed">(.*?)<div class="views-infos">') patron = 'src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: if "strdef" in scrapedurl: url = decode_url(scrapedurl) else: url = scrapedurl itemlist.append( Item(channel=item.channel, action="play", title="%s", contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist( itemlist, lambda i: i.title % i.server.capitalize()) a = len(itemlist) for i in itemlist: if a < 1: return [] if 'videoxseries' in i.url: res = "" else: res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = '<iframe src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for url in matches: itemlist.append( item.clone(action="play", title="%s", contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist( itemlist, lambda i: i.title % i.server.capitalize()) a = len(itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) patron = '<a href="([^"]+)" rel="nofollow"[^<]+>(?:Streaming|Download)' matches = scrapertools.find_multiple_matches(data, patron) for url in matches: if not "ubiqfile" in url: itemlist.append( item.clone(action='play', title="%s", contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist( itemlist, lambda i: i.title % i.server.capitalize()) itemlist.reverse() a = len(itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) patron = ' - on ([^"]+)" href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedtitle, url in matches: itemlist.append( Item(channel=item.channel, action="play", title="%s", contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist( itemlist, lambda i: i.title % i.server.capitalize()) logger.debug(itemlist) a = len(itemlist) for i in itemlist: if a < 1: return [] if 'clipwatching' in i.url: res = "" elif 'mangovideo' in i.url: res = "" else: res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) patron = ' - on ([^"]+)" href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedtitle,url in matches: if 'aHR0' in url: n = 3 while n > 0: url= url.replace("https://vshares.tk/goto/", "").replace("https://waaws.tk/goto/", "").replace("https://openloads.tk/goto/", "") url = base64.b64decode(url) n -= 1 itemlist.append( Item(channel=item.channel, action="play", title = "%s", contentTitle=item.title, url=url )) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) a = len (itemlist) for i in itemlist: if a < 1: return [] if 'mangovideo' in i.url: res = "" elif 'rapidgator' in i.url: res = "" elif 'clipwatching' in i.url: res = "" else: res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): itemlist = [] itemlist = servertools.find_video_items(item) a = len(itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '<a href="([^"]+)" [^<]+>Streaming' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl in matches: itemlist.append(item.clone(action="play", title= "%s", contentTitle= item.title, url=scrapedurl)) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) a = len (itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] soup = create_soup(item.url).find_all('iframe') logger.debug(soup) for elem in soup: url = elem['src'] itemlist.append(item.clone(action="play", title= "%s" , contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) a = len (itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): itemlist = [] data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match(data, '<div class="video_code">(.*?)<h3') patron = '(?:src|SRC)="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: if 'mixdrop' in scrapedurl: url = "https:" + scrapedurl headers = {'Referer': item.url} data = httptools.downloadpage(url, headers=headers).data url = scrapertools.find_single_match(data, 'vsrc = "([^"]+)"') url = "https:" + url else: url = scrapedurl if 'base64' in scrapedurl: #el base64 es netu.tv url = "https://hqq.tv/player/embed_player.php?vid=RODE5Z2Hx3hO&autoplay=none" itemlist.append( item.clone(action="play", title="%s", contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist( itemlist, lambda i: i.title % i.server.capitalize()) a = len(itemlist) for i in itemlist: if a < 1: return [] if 'mixdrop' in i.url: #check_video_link no analiza videos directos res = "green" else: res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |<br>", "", data) patron = ';extra_urls\[\d+\]=\'([^\']+)\'' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl in matches: scrapedurl = base64.b64decode(scrapedurl) itemlist.append(item.clone(action="play", title="%s", url=scrapedurl)) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) a = len (itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
def findvideos(item): logger.info() itemlist = [] itemlist_t = [] #Itemlist total de enlaces itemlist_f = [] #Itemlist de enlaces filtrados matches = [] #logger.debug(item) #Ahora tratamos los enlaces .torrent con las diferentes calidades for scrapedurl, scrapedserver in item.url_enlaces: #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() item_local.url = scrapedurl item_local.server = scrapedserver.lower() item_local.action = "play" #Buscamos tamaño en el archivo .torrent size = '' if item_local.server == 'torrent' and not size and not item_local.url.startswith('magnet:'): size = generictools.get_torrent_size(item_local.url) # Buscamos el tamaño en el .torrent desde la web if size: size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\ .replace('Mb', 'M·b').replace('.', ',') item_local.torrent_info = '%s, ' % size #Agregamos size if item_local.url.startswith('magnet:') and not 'Magnet' in item_local.torrent_info: item_local.torrent_info += ' Magnet' if item_local.torrent_info: item_local.torrent_info = item_local.torrent_info.strip().strip(',') if not item.unify: item_local.torrent_info = '[%s]' % item_local.torrent_info #Ahora pintamos lo enlaces item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][%s][/COLOR] ' %item_local.server.capitalize() \ + '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \ (item_local.quality, str(item_local.language), \ item_local.torrent_info) # Verificamos enlaces if item_local.server != 'torrent': if config.get_setting("hidepremium"): #Si no se aceptan servidore premium, se ignoran if not servertools.is_server_enabled(item_local.server): continue devuelve = servertools.findvideosbyserver(item_local.url, item_local.server) #existe el link ? if not devuelve: continue item_local.url = devuelve[0][1] item_local.alive = servertools.check_video_link(item_local.url, item_local.server, timeout=timeout) #activo el link ? if 'NO' in item_local.alive: continue else: if not size or 'Magnet' in size: item_local.alive = "??" #Calidad del link sin verificar elif 'ERROR' in size: item_local.alive = "no" #Calidad del link en error? continue else: item_local.alive = "ok" #Calidad del link verificada itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas # Requerido para FilterTools if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío if len(itemlist_f) > 0: #Si hay entradas filtradas... itemlist.extend(itemlist_f) #Pintamos pantalla filtrada else: if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... thumb_separador = get_thumb("next.png") #... pintamos todo con aviso itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador, folder=False)) itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado # Requerido para AutoPlay autoplay.start(itemlist, item) #Lanzamos Autoplay return itemlist
def findvideos(item): logger.info() itemlist = [] ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") item.url = item.url.replace(".com/", ".com/descarga-torrent/") # Obtener la información actualizada del Episodio if item.contentType == "episode": if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"): tmdb.set_infoLabels_item(item, seekTmdb=True) if not item.contentTitle: item.contentTitle = item.infoLabels['title'] # Descarga la página data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace( "//pictures", "/pictures") title = scrapertools.find_single_match( data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>" ) #corregido para adaptarlo a mispelisy.series.com title += scrapertools.find_single_match( data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>" ) #corregido para adaptarlo a mispelisy.series.com #caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"') caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)') patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";' # escraped torrent url = scrapertools.find_single_match(data, patron) if item.infoLabels['year']: #añadir el año al título general year = '[%s]' % str(item.infoLabels['year']) else: year = "" if item.infoLabels[ 'aired'] and item.contentType == "episode": #añadir el año de episodio para series year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})') year = '[%s]' % year title_gen = title if item.contentType == "episode": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) title_epi = '%sx%s - %s' % (str( item.contentSeason), str( item.contentEpisodeNumber), item.contentTitle) title_gen = '%s %s, %s' % (title_epi, year, title) title_torrent = '%s, %s' % (title_epi, item.contentSerieName) else: title_torrent = item.contentTitle if item.infoLabels['quality']: if not config.get_setting( "unify"): #Si Titulos Inteligentes NO seleccionados: title_torrent = '%s [%s]' % (title_torrent, item.infoLabels['quality']) else: title_torrent = '%s (%s)' % (title_torrent, item.infoLabels['quality']) if not config.get_setting( "unify"): #Si Titulos Inteligentes NO seleccionados: title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen) else: title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen) if config.get_setting( "quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary": title_gen = '%s: %s' % (item.channel.capitalize(), title_gen) itemlist.append( item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo title = title_torrent title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % ( title_torrent) if url != "": #Torrent itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title, url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False)) logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context)) # escraped ver vídeos, descargar vídeos un link, múltiples liks data = data.replace( "http://tumejorserie.com/descargar/url_encript.php?link=", "(") data = re.sub( r'javascript:;" onClick="popup\("http:\/\/(?:www.)?mispelisyseries.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data) # Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1 patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?' patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?' patron += '<\/div[^<]+<div class="box6">([^<]+)?<' enlaces_ver = re.compile(patron, re.DOTALL).findall(data) enlaces_descargar = enlaces_ver #logger.debug(enlaces_ver) if len(enlaces_ver) > 0: if not config.get_setting( "unify"): #Si Titulos Inteligentes NO seleccionados: itemlist.append( item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False)) else: itemlist.append( item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % ( servidor.capitalize(), titulo) logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] item.alive = servertools.check_video_link( enlace, servidor) if item.alive.lower() == "ok": titulo = '%s, %s' % (item.alive, titulo) elif item.alive == "??": titulo = '[COLOR salmon]%s[/COLOR], %s' % ( item.alive, titulo) else: logger.debug(item.alive + ": / " + titulo + " / " + enlace) raise itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass if len(enlaces_descargar) > 0: if not config.get_setting( "unify"): #Si Titulos Inteligentes NO seleccionados: itemlist.append( item.clone( title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False)) else: itemlist.append( item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") titulo = "Descarga " p = 1 logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) p += 1 mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % ( servidor.capitalize(), parte_titulo) if item.infoLabels['quality']: if not config.get_setting( "unify" ): #Si Titulos Inteligentes NO seleccionados: parte_titulo = '%s [%s]' % (parte_titulo, item.infoLabels['quality']) else: parte_titulo = '%s (%s)' % (parte_titulo, item.infoLabels['quality']) if mostrar_server: try: devuelve = servertools.findvideosbyserver( enlace, servidor) if devuelve: enlace = devuelve[0][1] if p <= 2: item.alive = servertools.check_video_link( enlace, servidor) if item.alive.lower() == "ok": parte_titulo = '%s, %s' % (item.alive, parte_titulo) elif item.alive == "??": parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % ( item.alive, parte_titulo) else: logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace) break itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass return itemlist
def findvideos(item): logger.info() itemlist = [] #Bajamos los datos de la página data = '' try: data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) except: pass if not data: logger.error( "ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data) itemlist.append( item.clone( action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log' )) return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos patron = 'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"' #coge los .torrent matches = re.compile(patron, re.DOTALL).findall(data) if not matches: #error logger.error( "ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) itemlist.append( item.clone( action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log' )) return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #logger.debug("PATRON: " + patron) #logger.debug(matches) for scrapedquality, scrapedsize, scrapedtorrent in matches: #leemos los torrents con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() item_local.quality = scrapedquality if item.infoLabels['duration']: item_local.quality += scrapertools.find_single_match( item.quality, '(\s\[.*?\])') #Copiamos la duración #Añadimos el tamaño para todos item_local.quality = '%s [%s]' % ( item_local.quality, scrapedsize ) #Agregamos size al final de calidad item_local.quality = item_local.quality.replace("G", "G ").replace( "M", "M ") #Se evita la palabra reservada en Unify #Ahora pintamos el link del Torrent item_local.url = scrapedtorrent item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % ( item_local.quality, str( item_local.language)) #Preparamos título de Torrent item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos item_local.alive = "??" #Calidad del link sin verificar item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent itemlist.append(item_local.clone()) #Pintar pantalla #logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) #Ahora tratamos el servidor directo item_local = item.clone() servidor = 'openload' item_local.quality = '' if item.infoLabels['duration']: item_local.quality = scrapertools.find_single_match( item.quality, '(\s\[.*?\])') #Copiamos la duración enlace = scrapertools.find_single_match( data, 'button-green-download-big".*?href="([^"]+)"><span class="icon-play">') if enlace: try: devuelve = servertools.findvideosbyserver( enlace, servidor) #existe el link ? if devuelve: enlace = devuelve[0][1] #Se guarda el link item_local.alive = "??" #Se asume poe defecto que es link es dudoso #Llama a la subfunción de check_list_links(itemlist) para cada link de servidor item_local.alive = servertools.check_video_link( enlace, servidor, timeout=5) #activo el link ? #Si el link no está activo se ignora if item_local.alive == "??": #dudoso item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % ( servidor.capitalize(), item_local.quality, str(item_local.language)) elif item_local.alive.lower( ) == "no": #No está activo. Lo preparo, pero no lo pinto item_local.title = '[COLOR red][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % ( item_local.alive, servidor.capitalize(), item_local.quality, str(item_local.language)) logger.debug(item_local.alive + ": ALIVE / " + title + " / " + servidor + " / " + enlace) raise else: #Sí está activo item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % ( servidor.capitalize(), item_local.quality, str(item_local.language)) #Preparamos el resto de variables de Item para ver los vídeos en directo item_local.action = "play" item_local.server = servidor item_local.url = enlace item_local.title = item_local.title.replace("[]", "").strip() item_local.title = re.sub( r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() itemlist.append(item_local.clone()) #logger.debug(item_local) except: pass return itemlist