def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data listado1 = scrapertools.find_single_match(data, '<div class="links" id="ver-mas-opciones"><h2 class="h2"><i class="[^"]+"></i>[^<]+</h2><ul class="opciones">(.*?)</ul>') patron1 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><span class="opcion"><i class="[^"]+"></i><u>[^<]+</u>[^<]+</span><span class="ico"><img src="[^"]+" alt="[^"]+"/>[^<]+</span><span>([^"]+)</span><span>([^"]+)</span></a></li>' matches = matches = re.compile(patron1, re.DOTALL).findall(listado1) for vidId, vidUrl, vidServer, language, quality in matches: server = servertools.get_server_name(vidServer) if 'Sub' in language: language='sub' itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId, title='Ver en ' + vidServer + ' | ' + language + ' | ' + quality, thumbnail=item.thumbnail, server=server, language=language, quality=quality )) listado2 = scrapertools.find_single_match(data, '<ul class="opciones-tab">(.*?)</ul>') patron2 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><img src="[^"]+" alt="[^"]+"/>[^<]+</a></li>' matches = matches = re.compile(patron2, re.DOTALL).findall(listado2) for vidId, vidUrl, vidServer in matches: server = servertools.get_server_name(vidServer) itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId, title='Ver en ' + vidServer, thumbnail=item.thumbnail, server=server)) for videoitem in itemlist: videoitem.fulltitle = item.title videoitem.folder = False return itemlist
def findvideos(item): logger.info(item.url) itemlist = [] data = httptools.downloadpage( item.url).data.decode('iso-8859-1').encode('utf-8') patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' # titulo matches = scrapertools.find_multiple_matches(data, patron) for sinopsis, title in matches: title = "[COLOR white][B]" + title + "[/B][/COLOR]" patron = '<tbody>(.*?)</tbody>' matchesx = scrapertools.find_multiple_matches(data, patron) for bloq in matchesx: patron = 'href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<' matches = scrapertools.find_multiple_matches(bloq, patron) for scrapedurl, scrapedserver, scrapedlang, scrapedquality in matches: url = scrapedurl patronenlaces = '.*?://(.*?)/' matchesenlaces = scrapertools.find_multiple_matches( scrapedurl, patronenlaces) scrapedtitle = "" if scrapedserver == 'Vimple': scrapedserver = 'vimpleru' elif scrapedserver == 'Ok.ru': scrapedserver = 'okru' server = servertools.get_server_name(scrapedserver) for scrapedenlace in matchesenlaces: scrapedtitle = "[COLOR white][ [/COLOR][COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR] [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver itemlist.append( item.clone(action="play", title=scrapedtitle, extra=title, url=url, fanart=item.thumbnail, language=scrapedlang, quality=scrapedquality, server=server)) tmdb.set_infoLabels(itemlist) if itemlist: itemlist.append(Item(channel=item.channel)) itemlist.append( item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", text_color="magenta")) # Opción "Añadir esta película a la biblioteca de KODI" if config.get_videolibrary_support(): itemlist.append( Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green", action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail, fulltitle=item.fulltitle)) return itemlist
def findvideos(item): logger.info() # Descarga la página data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match( data, '<div class="opciones">(.*?)<div id="sidebar"') title = item.title scrapedthumbnail = item.thumbnail itemlist = [] patron = '<span class="infotx">([^<]+)</span></th[^<]+' patron += '<th align="left"><img src="[^"]+" width="\d+" alt="([^"]+)"[^<]+</th[^<]+' patron += '<th align="left"><img[^>]+>([^<]+)</th[^<]+' patron += '<th class="slink" align="left"><div id="btnp"><a href="[^"]+" onClick="[^h]+([^\']+)\'' matches = re.compile(patron, re.DOTALL).findall(data) for servidor, idioma, calidad, scrapedurl in matches: url = scrapedurl server = servertools.get_server_name(servidor) title = item.title itemlist.append( Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url, thumbnail=scrapedthumbnail, language=idioma, quality=calidad, server=server)) return itemlist
def findvideos(item): logger.info() # Descarga la página data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match(data, '<div class="opciones">(.*?)<div id="sidebar"') title = item.title scrapedthumbnail = item.thumbnail itemlist = [] patron = '<span class="infotx">([^<]+)</span></th[^<]+' patron += '<th align="left"><img src="[^"]+" width="\d+" alt="([^"]+)"[^<]+</th[^<]+' patron += '<th align="left"><img[^>]+>([^<]+)</th[^<]+' patron += '<th class="slink" align="left"><div id="btnp"><a href="[^"]+" onClick="[^h]+([^\']+)\'' matches = re.compile(patron, re.DOTALL).findall(data) for servidor, idioma, calidad, scrapedurl in matches: url = scrapedurl server = servertools.get_server_name(servidor) title = "Enlace encontrado en %s" % (server) if idioma == 'Ingles Subtitulado': idioma = 'vose' itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url, thumbnail=scrapedthumbnail, language=idioma, quality=calidad, server=server)) if itemlist: itemlist.append(Item(channel=item.channel)) itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", text_color="magenta")) # Opción "Añadir esta película a la biblioteca de KODI" if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green", action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail, fulltitle=item.fulltitle)) return itemlist
def findvideos(item): logger.info(item.url) itemlist = [] data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' # titulo matches = re.compile(patron, re.DOTALL).findall(data) for sinopsis, title in matches: title = "[COLOR white][B]" + title + "[/B][/COLOR]" patron = '<div id="informacion".*?>(.*?)</div>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedplot in matches: splot = title + "\n\n" plot = scrapedplot plot = re.sub('<h2>', "[COLOR red][B]", plot) plot = re.sub('</h2>', "[/B][/COLOR] : ", plot) plot = re.sub('<p>', "[COLOR green]", plot) plot = re.sub('</p>', "[/COLOR]\n", plot) plot = re.sub('<[^>]+>', "", plot) splot += plot + "\n[COLOR red][B] Sinopsis[/B][/COLOR]\n " + sinopsis # datos de los enlaces ''' <a rel="nofollow" href="(.*?)".*?<td><img.*?</td><td>(.*?)</td><td>(.*?)</td></tr> ">Vimple</td> ''' patron = '<tbody>(.*?)</tbody>' matchesx = re.compile(patron, re.DOTALL).findall(data) for bloq in matchesx: patron = 'href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<' matches = re.compile(patron, re.DOTALL).findall(bloq) for scrapedurl, scrapedserver, scrapedlang, scrapedquality in matches: url = urlparse.urljoin(item.url, scrapedurl) logger.info("Lang:[" + scrapedlang + "] Quality[" + scrapedquality + "] URL[" + url + "]") patronenlaces = '.*?://(.*?)/' matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(scrapedurl) scrapedtitle = "" if scrapedserver == 'Vimple': scrapedserver = 'vimpleru' elif scrapedserver == 'Ok.ru': scrapedserver = 'okru' server = servertools.get_server_name(scrapedserver) for scrapedenlace in matchesenlaces: scrapedtitle = title + " [COLOR white][ [/COLOR]" + "[COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR]" + " [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, extra=title, url=url, fanart=item.thumbnail, thumbnail=item.thumbnail, plot=splot, language=scrapedlang, quality=scrapedquality, server=server)) return itemlist
def findvideos(item): logger.info() itemlist = [] if not item.urls: soup = get_source(item.url, soup=True) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) seriesdata = json['props']['pageProps']['data'] seasons = seriesdata['seasons'] item.urls = seasons[0]['episodes'][0]['players'] # Recorremos la lista de servidores for option in item.urls: server = server_list.get(option['name'].lower()) # Si no hay server (server nuevo o inválido), continuamos if not server: continue url = '{}{}'.format(server_urls.get(server, ''), option['id']) serv_name = servertools.get_server_name(server) new_item = Item(action='play', channel=item.channel, infoLabels=item.infoLabels, language=item.language, server=server, thumbnail=item.thumbnail, title='{}: {} {}'.format( config.get_localized_string(60335), serv_name.title(), unify.add_languages('', item.language)), url=url) # Chequeos (asignar fanart, plot y formatear títulos) if item.fanart and not new_item.fanart: new_item.fanart = item.fanart if item.contentPlot and not new_item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) # Si es peli y podemos, agregamos el elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_pelicula_to_library", channel=item.channel, contentType="movie", contentTitle=item.contentTitle, extra="findvideos", infoLabels={'year': item.infoLabels.get('year')}, title="[COLOR yellow]{}[/COLOR]".format( config.get_localized_string(60353)), url=item.url, videolibrary=True)) return itemlist
def findvideos(item): logger.info() itemlist = [] if item.videolibrary: return seasons(item) servers = [ opcion for opcion in ({key: val for key, val in sub.items() if val} for sub in item.urls) if opcion ] # Recorremos la lista de servidores for option in servers: server = server_list.get(option['opcion'].lower()) # Si no hay server (server nuevo o inválido), continuamos if not server: continue url = '{}{}'.format(server_urls.get(server, ''), option['url']) serv_name = servertools.get_server_name(server) new_item = Item(action='play', channel=item.channel, infoLabels=item.infoLabels, language=item.language, server=server, thumbnail=item.thumbnail, title=unify.add_languages( '{}: {}'.format(config.get_localized_string(60335), serv_name.title()), item.language), url=url) # Chequeos (asignar fanart, plot y formatear títulos) if item.fanart and not new_item.fanart: new_item.fanart = item.fanart if item.contentPlot and not new_item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) # Si es peli y podemos, agregamos el elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_pelicula_to_library", channel=item.channel, contentType="movie", contentTitle=item.contentTitle, extra="findvideos", infoLabels={'year': item.infoLabels.get('year')}, title="[COLOR yellow]{}[/COLOR]".format( config.get_localized_string(60353)), url=item.url, videolibrary=True)) return itemlist
def findvideos(item): logger.info() itemlist = [] if item.videolibrary: return seasons(item) servers = item.urls for option in servers: url = '' server = server_list.get(option['opcion'].lower()) url = '{}{}'.format(server_urls.get(server, ''), option['url']) if not server: continue serv_name = servertools.get_server_name(server) new_item = Item( action = 'play', channel = item.channel, infoLabels = item.infoLabels, language = item.language, server = server, thumbnail = item.thumbnail, title = unify.add_languages('{}: {}'.format(config.get_localized_string(60335), serv_name.title()), item.language), url = url ) if hasattr(item, 'fanart'): new_item.fanart = item.fanart if item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) if len(itemlist) > 0 and config.get_videolibrary_support() and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item( action = "add_pelicula_to_library", channel = item.channel, contentType = "movie", contentTitle = item.contentTitle, extra = "findvideos", infoLabels = {'year': item.infoLabels.get('year')}, title = "[COLOR yellow]{}[/COLOR]".format(config.get_localized_string(60353)), url = item.url, videolibrary = True ) ) return itemlist
def findvideos(item): logger.info() itemlist = [] data = scrapertools.cache_page(item.url) data = re.sub(r"<!--.*?-->", "", data) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) bloque_tab = scrapertools.find_single_match( data, '<div id="verpelicula">(.*?)<div class="tab_container">') patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' check = re.compile(patron, re.DOTALL).findall(bloque_tab) servers_data_list = [] patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron, re.DOTALL).findall(data) for check_tab, server, id in matches: scrapedplot = scrapertools.get_match( data, '<span class="clms">(.*?)</div></div>') plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot) scrapedplot = scrapedplot.replace( scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]")) for plot in plotformat: scrapedplot = scrapedplot.replace( plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]")) scrapedplot = scrapedplot.replace("</span>", "[CR]") scrapedplot = scrapedplot.replace(":", "") if check_tab in str(check): idioma, calidad = scrapertools.find_single_match( str(check), "" + check_tab + "', '(.*?)', '(.*?)'") servers_data_list.append([server, id, idioma, calidad]) url = "http://www.peliculasdk.com/Js/videod.js" data = scrapertools.cachePage(url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data = data.replace( '<iframe width="100%" height="400" scrolling="no" frameborder="0"', '') patron = 'function (\w+)\(id\).*?' patron += 'data-src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for server, url in matches: for enlace, id, idioma, calidad in servers_data_list: if server == enlace: video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url)) video_url = re.sub(r"'\+codigo\+'", "", video_url) video_url = video_url.replace('embed//', 'embed/') video_url = video_url + id if "goo.gl" in video_url: try: from unshortenit import unshorten url = unshorten(video_url) video_url = scrapertools.get_match( str(url), "u'([^']+)'") except: continue servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/') servertitle = servertitle.replace("embed.", "") servertitle = servertitle.replace("player.", "") servertitle = servertitle.replace("api.video.", "") servertitle = re.sub(r"hqq.tv|hqq.watch", "netutv", servertitle) servertitle = servertitle.replace("anonymouse.org", "netu") title = servertitle logger.debug('servertitle: %s' % servertitle) server = servertools.get_server_name(servertitle) logger.debug('server: %s' % server) itemlist.append( Item(channel=item.channel, title=title, url=video_url, action="play", thumbnail=item.category, plot=scrapedplot, fanart=item.show, server=server, language=idioma, quality=calidad)) if item.library and config.get_videolibrary_support( ) and len(itemlist) > 0: infoLabels = { 'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.fulltitle } itemlist.append( Item(channel=item.channel, title="Añadir esta película a la videoteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFff6666", thumbnail='http://imgur.com/0gyYvuC.png')) return itemlist